Merge branch 'master' into gh-pages

This commit is contained in:
Felix Kunde 2024-12-23 13:06:19 +01:00
commit 88491c658a
87 changed files with 1294 additions and 571 deletions

View File

@ -23,7 +23,7 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "^1.22.5"
go-version: "^1.23.4"
- name: Run unit tests
run: make deps mocks test

View File

@ -14,7 +14,7 @@ jobs:
- uses: actions/checkout@v1
- uses: actions/setup-go@v2
with:
go-version: "^1.22.5"
go-version: "^1.23.4"
- name: Make dependencies
run: make deps mocks
- name: Code generation

View File

@ -14,7 +14,7 @@ jobs:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: "^1.22.5"
go-version: "^1.23.4"
- name: Make dependencies
run: make deps mocks
- name: Compile
@ -22,7 +22,7 @@ jobs:
- name: Run unit tests
run: go test -race -covermode atomic -coverprofile=coverage.out ./...
- name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1.0.9
uses: jandelgado/gcov2lcov-action@v1.1.1
- name: Coveralls
uses: coverallsapp/github-action@master
with:

2
.gitignore vendored
View File

@ -104,3 +104,5 @@ e2e/tls
mocks
ui/.npm/
.DS_Store

View File

@ -69,7 +69,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE}
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" .
indocker-race:
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.22.5 bash -c "make linux"
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.23.4 bash -c "make linux"
push:
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
@ -78,7 +78,7 @@ mocks:
GO111MODULE=on go generate ./...
tools:
GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.28.12
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.30.4
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
GO111MODULE=on go mod tidy

View File

@ -28,13 +28,13 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
### PostgreSQL features
* Supports PostgreSQL 16, starting from 12+
* Supports PostgreSQL 17, starting from 13+
* Streaming replication cluster via Patroni
* Point-In-Time-Recovery with
[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html) /
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) /
[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),
[pg_stat_statements](https://www.postgresql.org/docs/16/pgstatstatements.html),
[pg_stat_statements](https://www.postgresql.org/docs/17/pgstatstatements.html),
[pgextwlist](https://github.com/dimitri/pgextwlist),
[pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon)
* Incl. popular Postgres extensions such as
@ -57,12 +57,12 @@ production for over five years.
| Release | Postgres versions | K8s versions | Golang |
| :-------- | :---------------: | :---------------: | :-----: |
| v1.14.0 | 13 → 17 | 1.27+ | 1.23.4 |
| v1.13.0 | 12 → 16 | 1.27+ | 1.22.5 |
| v1.12.0 | 11 → 16 | 1.27+ | 1.22.3 |
| v1.11.0 | 11 → 16 | 1.27+ | 1.21.7 |
| v1.10.1 | 10 → 15 | 1.21+ | 1.19.8 |
| v1.9.0 | 10 → 15 | 1.21+ | 1.18.9 |
| v1.8.2 | 9.5 → 14 | 1.20 → 1.24 | 1.17.4 |
## Getting started

View File

@ -1,7 +1,7 @@
apiVersion: v2
name: postgres-operator-ui
version: 1.13.0
appVersion: 1.13.0
version: 1.14.0
appVersion: 1.14.0
home: https://github.com/zalando/postgres-operator
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
keywords:

View File

@ -1,9 +1,32 @@
apiVersion: v1
entries:
postgres-operator-ui:
- apiVersion: v2
appVersion: 1.14.0
created: "2024-12-23T11:26:07.721761867+01:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: e87ed898079a852957a67a4caf3fbd27b9098e413f5d961b7a771a6ae8b3e17c
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- ui
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator-ui
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-ui-1.14.0.tgz
version: 1.14.0
- apiVersion: v2
appVersion: 1.13.0
created: "2024-08-21T18:55:36.524305158+02:00"
created: "2024-12-23T11:26:07.719409282+01:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8
@ -26,7 +49,7 @@ entries:
version: 1.13.0
- apiVersion: v2
appVersion: 1.12.2
created: "2024-08-21T18:55:36.521875733+02:00"
created: "2024-12-23T11:26:07.717202918+01:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd
@ -49,7 +72,7 @@ entries:
version: 1.12.2
- apiVersion: v2
appVersion: 1.11.0
created: "2024-08-21T18:55:36.51959105+02:00"
created: "2024-12-23T11:26:07.714792146+01:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2
@ -72,7 +95,7 @@ entries:
version: 1.11.0
- apiVersion: v2
appVersion: 1.10.1
created: "2024-08-21T18:55:36.516518177+02:00"
created: "2024-12-23T11:26:07.712194397+01:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce
@ -95,7 +118,7 @@ entries:
version: 1.10.1
- apiVersion: v2
appVersion: 1.9.0
created: "2024-08-21T18:55:36.52712908+02:00"
created: "2024-12-23T11:26:07.723891496+01:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc
@ -116,4 +139,4 @@ entries:
urls:
- postgres-operator-ui-1.9.0.tgz
version: 1.9.0
generated: "2024-08-21T18:55:36.512456099+02:00"
generated: "2024-12-23T11:26:07.709192608+01:00"

View File

@ -9,7 +9,7 @@ metadata:
name: {{ template "postgres-operator-ui.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
replicas: 1
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
@ -84,11 +84,11 @@ spec:
"limit_iops": 16000,
"limit_throughput": 1000,
"postgresql_versions": [
"17",
"16",
"15",
"14",
"13",
"12"
"13"
]
}
{{- if .Values.extraEnvs }}
@ -102,4 +102,4 @@ spec:
{{ toYaml .Values.tolerations | indent 8 }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- end }}

View File

@ -8,7 +8,7 @@ replicaCount: 1
image:
registry: ghcr.io
repository: zalando/postgres-operator-ui
tag: v1.13.0
tag: v1.14.0
pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets.

View File

@ -1,7 +1,7 @@
apiVersion: v2
name: postgres-operator
version: 1.13.0
appVersion: 1.13.0
version: 1.14.0
appVersion: 1.14.0
home: https://github.com/zalando/postgres-operator
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
keywords:

View File

@ -68,7 +68,7 @@ spec:
type: string
docker_image:
type: string
default: "ghcr.io/zalando/spilo-16:3.3-p1"
default: "ghcr.io/zalando/spilo-17:4.0-p2"
enable_crd_registration:
type: boolean
default: true
@ -167,10 +167,10 @@ spec:
type: string
minimal_major_version:
type: string
default: "12"
default: "13"
target_major_version:
type: string
default: "16"
default: "17"
kubernetes:
type: object
properties:
@ -376,28 +376,28 @@ spec:
properties:
default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$'
default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$'
default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$'
default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$'
max_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$'
max_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$'
min_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$'
min_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$'
timeouts:
type: object
properties:

View File

@ -375,11 +375,11 @@ spec:
version:
type: string
enum:
- "12"
- "13"
- "14"
- "15"
- "16"
- "17"
parameters:
type: object
additionalProperties:
@ -514,6 +514,9 @@ spec:
type: string
batchSize:
type: integer
cpu:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
database:
type: string
enableRecovery:
@ -522,6 +525,9 @@ spec:
type: object
additionalProperties:
type: string
memory:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
tables:
type: object
additionalProperties:
@ -533,6 +539,8 @@ spec:
type: string
idColumn:
type: string
ignoreRecovery:
type: boolean
payloadColumn:
type: string
recoveryEventType:

View File

@ -1,9 +1,31 @@
apiVersion: v1
entries:
postgres-operator:
- apiVersion: v2
appVersion: 1.14.0
created: "2024-12-23T11:25:32.596716566+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 36e1571f3f455b213f16cdda7b1158648e8e84deb804ba47ed6b9b6d19263ba8
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-1.14.0.tgz
version: 1.14.0
- apiVersion: v2
appVersion: 1.13.0
created: "2024-08-21T18:54:43.160735116+02:00"
created: "2024-12-23T11:25:32.591136261+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef
@ -25,7 +47,7 @@ entries:
version: 1.13.0
- apiVersion: v2
appVersion: 1.12.2
created: "2024-08-21T18:54:43.152249286+02:00"
created: "2024-12-23T11:25:32.585419709+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8
@ -47,7 +69,7 @@ entries:
version: 1.12.2
- apiVersion: v2
appVersion: 1.11.0
created: "2024-08-21T18:54:43.145837894+02:00"
created: "2024-12-23T11:25:32.580077286+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9
@ -69,7 +91,7 @@ entries:
version: 1.11.0
- apiVersion: v2
appVersion: 1.10.1
created: "2024-08-21T18:54:43.139552116+02:00"
created: "2024-12-23T11:25:32.574641578+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c
@ -91,7 +113,7 @@ entries:
version: 1.10.1
- apiVersion: v2
appVersion: 1.9.0
created: "2024-08-21T18:54:43.168490032+02:00"
created: "2024-12-23T11:25:32.604748814+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276
@ -111,4 +133,4 @@ entries:
urls:
- postgres-operator-1.9.0.tgz
version: 1.9.0
generated: "2024-08-21T18:54:43.126871802+02:00"
generated: "2024-12-23T11:25:32.568598763+01:00"

Binary file not shown.

View File

@ -140,8 +140,8 @@ rules:
- delete
- get
- list
{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }}
- patch
{{- if or (toString .Values.configKubernetes.storage_resize_mode | eq "pvc") (toString .Values.configKubernetes.storage_resize_mode | eq "mixed") }}
- update
{{- end }}
# to read existing PVs. Creation should be done via dynamic provisioning

View File

@ -54,7 +54,7 @@ spec:
value: {{ template "postgres-operator.controllerID" . }}
{{- end }}
{{- if .Values.extraEnvs }}
{{- .Values.extraEnvs | toYaml | nindent 12 }}
{{ toYaml .Values.extraEnvs | indent 8 }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10 }}

View File

@ -1,7 +1,7 @@
image:
registry: ghcr.io
repository: zalando/postgres-operator
tag: v1.13.0
tag: v1.14.0
pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets.
@ -38,7 +38,7 @@ configGeneral:
# etcd connection string for Patroni. Empty uses K8s-native DCS.
etcd_host: ""
# Spilo docker image
docker_image: ghcr.io/zalando/spilo-16:3.3-p1
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
# key name for annotation to ignore globally configured instance limits
# ignore_instance_limits_annotation_key: ""
@ -89,9 +89,9 @@ configMajorVersionUpgrade:
# - acid
# minimal Postgres major version that will not automatically be upgraded
minimal_major_version: "12"
minimal_major_version: "13"
# target Postgres major version when upgrading clusters automatically
target_major_version: "16"
target_major_version: "17"
configKubernetes:
# list of additional capabilities for postgres container

View File

@ -35,6 +35,8 @@ func init() {
flag.BoolVar(&outOfCluster, "outofcluster", false, "Whether the operator runs in- our outside of the Kubernetes cluster.")
flag.BoolVar(&config.NoDatabaseAccess, "nodatabaseaccess", false, "Disable all access to the database from the operator side.")
flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API")
flag.IntVar(&config.KubeQPS, "kubeqps", 10, "Kubernetes api requests per second.")
flag.IntVar(&config.KubeBurst, "kubeburst", 20, "Kubernetes api requests burst limit.")
flag.Parse()
config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true"
@ -83,6 +85,9 @@ func main() {
log.Fatalf("couldn't get REST config: %v", err)
}
config.RestConfig.QPS = float32(config.KubeQPS)
config.RestConfig.Burst = config.KubeBurst
c := controller.NewController(&config, "")
c.Run(stop, wg)

View File

@ -1,4 +1,4 @@
FROM golang:1.22-alpine
FROM golang:1.23-alpine
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
# We need root certificates to deal with teams api over https

View File

@ -1,5 +1,5 @@
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest
FROM golang:1.22-alpine AS builder
FROM golang:1.23-alpine AS builder
ARG VERSION=latest
COPY . /go/src/github.com/zalando/postgres-operator

View File

@ -13,7 +13,7 @@ apt-get install -y wget
(
cd /tmp
wget -q "https://storage.googleapis.com/golang/go1.22.5.linux-${arch}.tar.gz" -O go.tar.gz
wget -q "https://storage.googleapis.com/golang/go1.23.4.linux-${arch}.tar.gz" -O go.tar.gz
tar -xf go.tar.gz
mv go /usr/local
ln -s /usr/local/go/bin/go /usr/bin/go

View File

@ -63,14 +63,17 @@ the `PGVERSION` environment variable is set for the database pods. Since
`v1.6.0` the related option `enable_pgversion_env_var` is enabled by default.
In-place major version upgrades can be configured to be executed by the
operator with the `major_version_upgrade_mode` option. By default it is set
to `off` which means the cluster version will not change when increased in
the manifest. Still, a rolling update would be triggered updating the
`PGVERSION` variable. But Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py)
script will notice the version mismatch and start the old version again.
operator with the `major_version_upgrade_mode` option. By default, it is
enabled (mode: `manual`). In any case, altering the version in the manifest
will trigger a rolling update of pods to update the `PGVERSION` env variable.
Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py)
script will notice the version mismatch but start the current version again.
In this scenario the major version could then be run by a user from within the
primary pod. Exec into the container and run:
Next, the operator would call an updage script inside Spilo. When automatic
upgrades are disabled (mode: `off`) the upgrade could still be run by a user
from within the primary pod. This gives you full control about the point in
time when the upgrade can be started (check also maintenance windows below).
Exec into the container and run:
```bash
python3 /scripts/inplace_upgrade.py N
```
@ -79,11 +82,32 @@ The upgrade is usually fast, well under one minute for most DBs. Note, that
changes become irrevertible once `pg_upgrade` is called. To understand the
upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
When `major_version_upgrade_mode` is set to `manual` the operator will run
the upgrade script for you after the manifest is updated and pods are rotated.
It is also possible to define `maintenanceWindows` in the Postgres manifest to
better control when such automated upgrades should take place after increasing
the version.
When `major_version_upgrade_mode` is set to `full` the operator will compare
the version in the manifest with the configured `minimal_major_version`. If it
is lower the operator would start an automatic upgrade as described above. The
configured `major_target_version` will be used as the new version. This option
can be useful if you have to get rid of outdated major versions in your fleet.
Please note, that the operator does not patch the version in the manifest.
Thus, the `full` mode can create drift between desired and actual state.
### Upgrade during maintenance windows
When `maintenanceWindows` are defined in the Postgres manifest the operator
will trigger a major version upgrade only during these periods. Make sure they
are at least twice as long as your configured `resync_period` to guarantee
that operator actions can be triggered.
### Upgrade annotations
When an upgrade is executed, the operator sets an annotation in the PostgreSQL
resource, either `last-major-upgrade-success` if the upgrade succeeds, or
`last-major-upgrade-failure` if it fails. The value of the annotation is a
timestamp indicating when the upgrade occurred.
If a PostgreSQL resource contains a failure annotation, the operator will not
attempt to retry the upgrade during a sync event. To remove the failure
annotation, you can revert the PostgreSQL version back to the current version.
This action will trigger the removal of the failure annotation.
## Non-default cluster domain
@ -1273,7 +1297,7 @@ aws_or_gcp:
If cluster members have to be (re)initialized restoring physical backups
happens automatically either from the backup location or by running
[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html)
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html)
on one of the other running instances (preferably replicas if they do not lag
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
clusters.
@ -1381,6 +1405,10 @@ configuration:
volumeMounts:
- mountPath: /custom-pgdata-mountpoint
name: pgdata
env:
- name: "ENV_VAR_NAME"
value: "any-k8s-env-things"
command: ['sh', '-c', 'echo "logging" > /opt/logs.txt']
- ...
```

View File

@ -186,7 +186,7 @@ go get -u github.com/derekparker/delve/cmd/dlv
```
RUN apk --no-cache add go git musl-dev
RUN go get -d github.com/derekparker/delve/cmd/dlv
RUN go get github.com/derekparker/delve/cmd/dlv
```
* Update the `Makefile` to build the project with debugging symbols. For that

View File

@ -638,7 +638,7 @@ the global configuration before adding the `tls` section'.
## Change data capture streams
This sections enables change data capture (CDC) streams via Postgres'
[logical decoding](https://www.postgresql.org/docs/16/logicaldecoding.html)
[logical decoding](https://www.postgresql.org/docs/17/logicaldecoding.html)
feature and `pgoutput` plugin. While the Postgres operator takes responsibility
for providing the setup to publish change events, it relies on external tools
to consume them. At Zalando, we are using a workflow based on
@ -652,11 +652,11 @@ can have the following properties:
* **applicationId**
The application name to which the database and CDC belongs to. For each
set of streams with a distinct `applicationId` a separate stream CR as well
as a separate logical replication slot will be created. This means there can
be different streams in the same database and streams with the same
`applicationId` are bundled in one stream CR. The stream CR will be called
like the Postgres cluster plus "-<applicationId>" suffix. Required.
set of streams with a distinct `applicationId` a separate stream resource as
well as a separate logical replication slot will be created. This means there
can be different streams in the same database and streams with the same
`applicationId` are bundled in one stream resource. The stream resource will
be called like the Postgres cluster plus "-<applicationId>" suffix. Required.
* **database**
Name of the database from where events will be published via Postgres'
@ -667,21 +667,37 @@ can have the following properties:
* **tables**
Defines a map of table names and their properties (`eventType`, `idColumn`
and `payloadColumn`). The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
and `payloadColumn`). Required.
The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
The application is responsible for putting events into a (JSON/B or VARCHAR)
payload column of the outbox table in the structure of the specified target
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/16/logical-replication-publication.html)
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/17/logical-replication-publication.html)
in Postgres for all tables specified for one `database` and `applicationId`.
The CDC operator will consume from it shortly after transactions are
committed to the outbox table. The `idColumn` will be used in telemetry for
the CDC operator. The names for `idColumn` and `payloadColumn` can be
configured. Defaults are `id` and `payload`. The target `eventType` has to
be defined. Required.
be defined. One can also specify a `recoveryEventType` that will be used
for a dead letter queue. By enabling `ignoreRecovery`, you can choose to
ignore failing events.
* **filter**
Streamed events can be filtered by a jsonpath expression for each table.
Optional.
* **enableRecovery**
Flag to enable a dead letter queue recovery for all streams tables.
Alternatively, recovery can also be enable for single outbox tables by only
specifying a `recoveryEventType` and no `enableRecovery` flag. When set to
false or missing, events will be retried until consuming succeeded. You can
use a `filter` expression to get rid of poison pills. Optional.
* **batchSize**
Defines the size of batches in which events are consumed. Optional.
Defaults to 1.
* **cpu**
CPU requests to be set as an annotation on the stream resource. Optional.
* **memory**
memory requests to be set as an annotation on the stream resource. Optional.

View File

@ -94,9 +94,6 @@ Those are top-level keys, containing both leaf keys and groups.
* **enable_pgversion_env_var**
With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`.
* **enable_spilo_wal_path_compat**
enables backwards compatible path between Spilo 12 and Spilo 13+ images. The default is `false`.
* **enable_team_id_clustername_prefix**
To lower the risk of name clashes between clusters of different teams you
can turn on this flag and the operator will sync only clusters where the
@ -250,12 +247,12 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key.
* **minimal_major_version**
The minimal Postgres major version that will not automatically be upgraded
when `major_version_upgrade_mode` is set to `"full"`. The default is `"12"`.
when `major_version_upgrade_mode` is set to `"full"`. The default is `"13"`.
* **target_major_version**
The target Postgres major version when upgrading clusters automatically
which violate the configured allowed `minimal_major_version` when
`major_version_upgrade_mode` is set to `"full"`. The default is `"16"`.
`major_version_upgrade_mode` is set to `"full"`. The default is `"17"`.
## Kubernetes resources
@ -366,7 +363,7 @@ configuration they are grouped under the `kubernetes` key.
manifest. To keep secrets, set this option to `false`. The default is `true`.
* **enable_persistent_volume_claim_deletion**
By default, the operator deletes PersistentVolumeClaims when removing the
By default, the operator deletes persistent volume claims when removing the
Postgres cluster manifest, no matter if `persistent_volume_claim_retention_policy`
on the statefulset is set to `retain`. To keep PVCs set this option to `false`.
The default is `true`.

View File

@ -30,7 +30,7 @@ spec:
databases:
foo: zalando
postgresql:
version: "16"
version: "17"
```
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
@ -109,7 +109,7 @@ metadata:
spec:
[...]
postgresql:
version: "16"
version: "17"
parameters:
password_encryption: scram-sha-256
```
@ -517,7 +517,7 @@ Postgres Operator will create the following NOLOGIN roles:
The `<dbname>_owner` role is the database owner and should be used when creating
new database objects. All members of the `admin` role, e.g. teams API roles, can
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/16/sql-alterdefaultprivileges.html)
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/17/sql-alterdefaultprivileges.html)
are configured for the owner role so that the `<dbname>_reader` role
automatically gets read-access (SELECT) to new tables and sequences and the
`<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables,
@ -594,7 +594,7 @@ spec:
### Schema `search_path` for default roles
The schema [`search_path`](https://www.postgresql.org/docs/16/ddl-schemas.html#DDL-SCHEMAS-PATH)
The schema [`search_path`](https://www.postgresql.org/docs/17/ddl-schemas.html#DDL-SCHEMAS-PATH)
for each role will include the role name and the schemas, this role should have
access to. So `foo_bar_writer` does not have to schema-qualify tables from
schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and
@ -695,7 +695,7 @@ handle it.
### HugePages support
The operator supports [HugePages](https://www.postgresql.org/docs/16/kernel-resources.html#LINUX-HUGEPAGES).
The operator supports [HugePages](https://www.postgresql.org/docs/17/kernel-resources.html#LINUX-HUGEPAGES).
To enable HugePages, set the matching resource requests and/or limits in the manifest:
```yaml
@ -838,7 +838,7 @@ spec:
### Clone directly
Another way to get a fresh copy of your source DB cluster is via
[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html). To
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html). To
use this feature simply leave out the timestamp field from the clone section.
The operator will connect to the service of the source cluster by name. If the
cluster is called test, then the connection string will look like host=test
@ -1005,6 +1005,7 @@ spec:
env:
- name: "ENV_VAR_NAME"
value: "any-k8s-env-things"
command: ['sh', '-c', 'echo "logging" > /opt/logs.txt']
```
In addition to any environment variables you specify, the following environment

View File

@ -46,7 +46,7 @@ tools:
# install pinned version of 'kind'
# go install must run outside of a dir with a (module-based) Go project !
# otherwise go install updates project's dependencies and/or behaves differently
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.23.0
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.24.0
e2etest: tools copy clean
./run.sh main

View File

@ -8,7 +8,7 @@ IFS=$'\n\t'
readonly cluster_name="postgres-operator-e2e-tests"
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-16-e2e:0.1"
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3"
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
export GOPATH=${GOPATH-~/go}

View File

@ -12,10 +12,9 @@ from kubernetes import client
from tests.k8s_api import K8s
from kubernetes.client.rest import ApiException
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1"
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2"
SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3"
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.3"
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.4"
SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p2"
def to_selector(labels):
return ",".join(["=".join(lbl) for lbl in labels.items()])
@ -1185,27 +1184,33 @@ class EndToEndTestCase(unittest.TestCase):
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_major_version_upgrade(self):
"""
Test major version upgrade
Test major version upgrade: with full upgrade, maintenance window, and annotation
"""
def check_version():
p = k8s.patroni_rest("acid-upgrade-test-0", "")
version = p.get("server_version", 0) // 10000
return version
def get_annotations():
pg_manifest = k8s.api.custom_objects_api.get_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test")
annotations = pg_manifest["metadata"]["annotations"]
return annotations
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-upgrade-test'
with open("manifests/minimal-postgres-manifest-12.yaml", 'r+') as f:
with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'r+') as f:
upgrade_manifest = yaml.safe_load(f)
upgrade_manifest["spec"]["dockerImage"] = SPILO_FULL_IMAGE
with open("manifests/minimal-postgres-manifest-12.yaml", 'w') as f:
with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'w') as f:
yaml.dump(upgrade_manifest, f, Dumper=yaml.Dumper)
k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml")
k8s.create_with_kubectl("manifests/minimal-postgres-lowest-version-manifest.yaml")
self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(check_version, 12, "Version is not correct")
self.eventuallyEqual(check_version, 13, "Version is not correct")
master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label)
# should upgrade immediately
@ -1220,11 +1225,14 @@ class EndToEndTestCase(unittest.TestCase):
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# should have finish failover
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 14, "Version should be upgraded from 12 to 14")
self.eventuallyEqual(check_version, 14, "Version should be upgraded from 13 to 14")
# check if annotation for last upgrade's success is set
annotations = get_annotations()
self.assertIsNotNone(annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not set")
# should not upgrade because current time is not in maintenanceWindow
current_time = datetime.now()
@ -1243,12 +1251,14 @@ class EndToEndTestCase(unittest.TestCase):
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# should have finish failover
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 14, "Version should not be upgraded")
second_annotations = get_annotations()
self.assertIsNone(second_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure should not be set")
# change the version again to trigger operator sync
maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}"
pg_patch_version_16 = {
@ -1266,12 +1276,50 @@ class EndToEndTestCase(unittest.TestCase):
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# should have finish failover
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 16, "Version should be upgraded from 14 to 16")
# check if annotation for last upgrade's success is updated after second upgrade
third_annotations = get_annotations()
self.assertIsNotNone(third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not set")
self.assertNotEqual(annotations.get("last-major-upgrade-success"), third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not updated")
# test upgrade with failed upgrade annotation
pg_patch_version_17 = {
"metadata": {
"annotations": {
"last-major-upgrade-failure": "2024-01-02T15:04:05Z"
},
},
"spec": {
"postgresql": {
"version": "17"
},
},
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_17)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 16, "Version should not be upgraded because annotation for last upgrade's failure is set")
# change the version back to 15 and should remove failure annotation
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
fourth_annotations = get_annotations()
self.assertIsNone(fourth_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure is not removed")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_persistent_volume_claim_retention_policy(self):
'''
@ -2155,6 +2203,8 @@ class EndToEndTestCase(unittest.TestCase):
{
"applicationId": "test-app",
"batchSize": 100,
"cpu": "100m",
"memory": "200Mi",
"database": "foo",
"enableRecovery": True,
"tables": {
@ -2176,7 +2226,7 @@ class EndToEndTestCase(unittest.TestCase):
"eventType": "test-event",
"idColumn": "id",
"payloadColumn": "payload",
"recoveryEventType": "test-event-dlq"
"ignoreRecovery": True
}
}
}

36
go.mod
View File

@ -1,6 +1,6 @@
module github.com/zalando/postgres-operator
go 1.22
go 1.23.4
require (
github.com/aws/aws-sdk-go v1.53.8
@ -11,21 +11,22 @@ require (
github.com/r3labs/diff v1.1.0
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.9.0
golang.org/x/crypto v0.26.0
golang.org/x/crypto v0.31.0
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.28.12
k8s.io/api v0.30.4
k8s.io/apiextensions-apiserver v0.25.9
k8s.io/apimachinery v0.28.12
k8s.io/client-go v0.28.12
k8s.io/apimachinery v0.30.4
k8s.io/client-go v0.30.4
k8s.io/code-generator v0.25.9
)
require (
github.com/Masterminds/semver v1.5.0
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
@ -36,6 +37,7 @@ require (
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
@ -46,15 +48,16 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.23.0 // indirect
golang.org/x/term v0.23.0 // indirect
golang.org/x/text v0.17.0 // indirect
golang.org/x/oauth2 v0.10.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
google.golang.org/appengine v1.6.7 // indirect
@ -62,10 +65,11 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

78
go.sum
View File

@ -1,3 +1,5 @@
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go v1.53.8 h1:eoqGb1WOHIrCFKo1d51cMcnt1ralfLFaEqRkC5Zzv8k=
@ -6,14 +8,13 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
@ -34,6 +35,7 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -45,6 +47,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -80,10 +84,12 @@ github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+p
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -113,8 +119,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -130,14 +136,14 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -145,16 +151,16 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -186,29 +192,31 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.28.12 h1:C2hpsaso18pqn0Dmkfnbv/YCctozTC3KGGuZ6bF7zhQ=
k8s.io/api v0.28.12/go.mod h1:qjswI+whxvf9LAKD4sEYHfy+WgHGWeH+H5sCRQMwZAQ=
k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs=
k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0=
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
k8s.io/apimachinery v0.28.12 h1:VepMEVOi9o7L/4wMAXJq+3BK9tqBIeerTB+HSOTKeo0=
k8s.io/apimachinery v0.28.12/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o=
k8s.io/client-go v0.28.12 h1:li7iRPRQF3vDki6gTxT/kXWJvw3BkJSdjVPVhDTZQec=
k8s.io/client-go v0.28.12/go.mod h1:yEzH2Z+nEGlrnKyHJWcJsbOr5tGdIj04dj1TVQOg0wE=
k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY=
k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY=
k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc=
k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w=
k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@ -1,20 +1,20 @@
module github.com/zalando/postgres-operator/kubectl-pg
go 1.22
go 1.23.4
require (
github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.19.0
github.com/zalando/postgres-operator v1.12.2
k8s.io/api v0.28.12
github.com/zalando/postgres-operator v1.13.0
k8s.io/api v0.30.4
k8s.io/apiextensions-apiserver v0.25.9
k8s.io/apimachinery v0.28.12
k8s.io/client-go v0.28.12
k8s.io/apimachinery v0.30.4
k8s.io/client-go v0.30.4
)
require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
@ -23,9 +23,9 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@ -40,6 +40,7 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
@ -50,13 +51,13 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/term v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/protobuf v1.33.0 // indirect
@ -64,10 +65,10 @@ require (
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

View File

@ -6,13 +6,12 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
@ -32,8 +31,9 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -42,6 +42,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
@ -78,10 +80,12 @@ github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+p
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -125,16 +129,16 @@ github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSW
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zalando/postgres-operator v1.12.2 h1:HJLrGSJLKYkvdpHIxlAKhXWTeRsgDQki2s9QOyApUX0=
github.com/zalando/postgres-operator v1.12.2/go.mod h1:tKNY4pMjnr5BhuzGiGngf1SPJ7K1vVRCmMkfmV9KZoQ=
github.com/zalando/postgres-operator v1.13.0 h1:T9Mb+ZRQyTxXbagIK66GLVGCwM3661aX2lOkNpax4s8=
github.com/zalando/postgres-operator v1.13.0/go.mod h1:WiMEKzUny2lJHYle+7+D/5BhlvPn8prl76rEDYLsQAg=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -146,8 +150,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -162,18 +166,18 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -181,8 +185,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -206,23 +210,23 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.28.12 h1:C2hpsaso18pqn0Dmkfnbv/YCctozTC3KGGuZ6bF7zhQ=
k8s.io/api v0.28.12/go.mod h1:qjswI+whxvf9LAKD4sEYHfy+WgHGWeH+H5sCRQMwZAQ=
k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs=
k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0=
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
k8s.io/apimachinery v0.28.12 h1:VepMEVOi9o7L/4wMAXJq+3BK9tqBIeerTB+HSOTKeo0=
k8s.io/apimachinery v0.28.12/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o=
k8s.io/client-go v0.28.12 h1:li7iRPRQF3vDki6gTxT/kXWJvw3BkJSdjVPVhDTZQec=
k8s.io/client-go v0.28.12/go.mod h1:yEzH2Z+nEGlrnKyHJWcJsbOr5tGdIj04dj1TVQOg0wE=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY=
k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY=
k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@ -25,11 +25,11 @@ RUN apt-get update \
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
&& apt-get update \
&& apt-get install --no-install-recommends -y \
postgresql-client-17 \
postgresql-client-16 \
postgresql-client-15 \
postgresql-client-14 \
postgresql-client-13 \
postgresql-client-12 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

View File

@ -10,7 +10,7 @@ metadata:
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
spec:
dockerImage: ghcr.io/zalando/spilo-16:3.3-p1
dockerImage: ghcr.io/zalando/spilo-17:4.0-p2
teamId: "acid"
numberOfInstances: 2
users: # Application/Robot users
@ -48,7 +48,7 @@ spec:
defaultRoles: true
defaultUsers: false
postgresql:
version: "16"
version: "17"
parameters: # Expert section
shared_buffers: "32MB"
max_connections: "10"

View File

@ -34,7 +34,7 @@ data:
default_memory_request: 100Mi
# delete_annotation_date_key: delete-date
# delete_annotation_name_key: delete-clustername
docker_image: ghcr.io/zalando/spilo-16:3.3-p1
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
# downscaler_annotations: "deployment-time,downscaler/*"
enable_admin_role_for_users: "true"
enable_crd_registration: "true"
@ -86,7 +86,7 @@ data:
# logical_backup_cpu_limit: ""
# logical_backup_cpu_request: ""
logical_backup_cronjob_environment_secret: ""
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
# logical_backup_google_application_credentials: ""
logical_backup_job_prefix: "logical-backup-"
# logical_backup_memory_limit: ""
@ -112,7 +112,7 @@ data:
min_cpu_limit: 250m
min_instances: "-1"
min_memory_limit: 250Mi
minimal_major_version: "12"
minimal_major_version: "13"
# node_readiness_label: "status:ready"
# node_readiness_label_merge: "OR"
oauth_token_secret_name: postgresql-operator
@ -162,7 +162,7 @@ data:
spilo_privileged: "false"
storage_resize_mode: "pvc"
super_username: postgres
target_major_version: "16"
target_major_version: "17"
team_admin_role: "admin"
team_api_role_configuration: "log_statement:all"
teams_api_url: http://fake-teams-api.default.svc.cluster.local

View File

@ -31,11 +31,21 @@ spec:
version: "13"
sidecars:
- name: "exporter"
image: "wrouesnel/postgres_exporter"
image: "quay.io/prometheuscommunity/postgres-exporter:v0.15.0"
ports:
- name: exporter
containerPort: 9187
protocol: TCP
env:
- name: DATA_SOURCE_URI
value: ":5432/?sslmode=disable"
- name: DATA_SOURCE_USER
value: "postgres"
- name: DATA_SOURCE_PASS
valueFrom:
secretKeyRef:
name: postgres.test-pg.credentials.postgresql.acid.zalan.do
key: password
resources:
limits:
cpu: 500m

View File

@ -17,4 +17,4 @@ spec:
preparedDatabases:
bar: {}
postgresql:
version: "12"
version: "13"

View File

@ -17,4 +17,4 @@ spec:
preparedDatabases:
bar: {}
postgresql:
version: "16"
version: "17"

View File

@ -66,7 +66,7 @@ spec:
type: string
docker_image:
type: string
default: "ghcr.io/zalando/spilo-16:3.3-p1"
default: "ghcr.io/zalando/spilo-17:4.0-p2"
enable_crd_registration:
type: boolean
default: true
@ -165,10 +165,10 @@ spec:
type: string
minimal_major_version:
type: string
default: "12"
default: "13"
target_major_version:
type: string
default: "16"
default: "17"
kubernetes:
type: object
properties:
@ -374,28 +374,28 @@ spec:
properties:
default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$'
default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$'
default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$'
default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$'
max_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$'
max_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$'
min_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$'
min_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$'
timeouts:
type: object
properties:
@ -508,7 +508,7 @@ spec:
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
logical_backup_docker_image:
type: string
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
logical_backup_google_application_credentials:
type: string
logical_backup_job_prefix:

View File

@ -19,7 +19,7 @@ spec:
serviceAccountName: postgres-operator
containers:
- name: postgres-operator
image: ghcr.io/zalando/postgres-operator:v1.13.0
image: ghcr.io/zalando/postgres-operator:v1.14.0
imagePullPolicy: IfNotPresent
resources:
requests:

View File

@ -3,7 +3,7 @@ kind: OperatorConfiguration
metadata:
name: postgresql-operator-default-configuration
configuration:
docker_image: ghcr.io/zalando/spilo-16:3.3-p1
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
# enable_crd_registration: true
# crd_categories:
# - all
@ -39,8 +39,8 @@ configuration:
major_version_upgrade_mode: "manual"
# major_version_upgrade_team_allow_list:
# - acid
minimal_major_version: "12"
target_major_version: "16"
minimal_major_version: "13"
target_major_version: "17"
kubernetes:
# additional_pod_capabilities:
# - "SYS_NICE"
@ -168,7 +168,7 @@ configuration:
# logical_backup_cpu_request: ""
# logical_backup_memory_limit: ""
# logical_backup_memory_request: ""
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
# logical_backup_google_application_credentials: ""
logical_backup_job_prefix: "logical-backup-"
logical_backup_provider: "s3"

View File

@ -373,11 +373,11 @@ spec:
version:
type: string
enum:
- "12"
- "13"
- "14"
- "15"
- "16"
- "17"
parameters:
type: object
additionalProperties:
@ -512,6 +512,9 @@ spec:
type: string
batchSize:
type: integer
cpu:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
database:
type: string
enableRecovery:
@ -520,6 +523,9 @@ spec:
type: object
additionalProperties:
type: string
memory:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
tables:
type: object
additionalProperties:
@ -531,6 +537,8 @@ spec:
type: string
idColumn:
type: string
ignoreRecovery:
type: boolean
payloadColumn:
type: string
recoveryEventType:

View File

@ -8,7 +8,7 @@ spec:
size: 1Gi
numberOfInstances: 1
postgresql:
version: "16"
version: "17"
# Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming.
standby:
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"

View File

@ -595,9 +595,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
"version": {
Type: "string",
Enum: []apiextv1.JSON{
{
Raw: []byte(`"12"`),
},
{
Raw: []byte(`"13"`),
},
@ -610,6 +607,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
{
Raw: []byte(`"16"`),
},
{
Raw: []byte(`"17"`),
},
},
},
"parameters": {
@ -1164,7 +1164,8 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
Type: "boolean",
},
"enable_spilo_wal_path_compat": {
Type: "boolean",
Type: "boolean",
Description: "deprecated",
},
"enable_team_id_clustername_prefix": {
Type: "boolean",
@ -1573,35 +1574,35 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
Properties: map[string]apiextv1.JSONSchemaProps{
"default_cpu_limit": {
Type: "string",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$",
},
"default_cpu_request": {
Type: "string",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$",
},
"default_memory_limit": {
Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$",
},
"default_memory_request": {
Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$",
},
"max_cpu_request": {
Type: "string",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$",
},
"max_memory_request": {
Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$",
},
"min_cpu_limit": {
Type: "string",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$",
},
"min_memory_limit": {
Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$",
},
},
},

View File

@ -49,8 +49,8 @@ type PostgresUsersConfiguration struct {
type MajorVersionUpgradeConfiguration struct {
MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"manual"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"`
MinimalMajorVersion string `json:"minimal_major_version" default:"12"`
TargetMajorVersion string `json:"target_major_version" default:"16"`
MinimalMajorVersion string `json:"minimal_major_version" default:"13"`
TargetMajorVersion string `json:"target_major_version" default:"17"`
}
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself

View File

@ -220,6 +220,7 @@ type Sidecar struct {
DockerImage string `json:"image,omitempty"`
Ports []v1.ContainerPort `json:"ports,omitempty"`
Env []v1.EnvVar `json:"env,omitempty"`
Command []string `json:"command,omitempty"`
}
// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users
@ -258,6 +259,8 @@ type Stream struct {
Tables map[string]StreamTable `json:"tables"`
Filter map[string]*string `json:"filter,omitempty"`
BatchSize *uint32 `json:"batchSize,omitempty"`
CPU *string `json:"cpu,omitempty"`
Memory *string `json:"memory,omitempty"`
EnableRecovery *bool `json:"enableRecovery,omitempty"`
}
@ -265,6 +268,7 @@ type Stream struct {
type StreamTable struct {
EventType string `json:"eventType"`
RecoveryEventType string `json:"recoveryEventType,omitempty"`
IgnoreRecovery *bool `json:"ignoreRecovery,omitempty"`
IdColumn *string `json:"idColumn,omitempty"`
PayloadColumn *string `json:"payloadColumn,omitempty"`
}

View File

@ -219,7 +219,7 @@ var unmarshalCluster = []struct {
"127.0.0.1/32"
],
"postgresql": {
"version": "16",
"version": "17",
"parameters": {
"shared_buffers": "32MB",
"max_connections": "10",
@ -279,7 +279,7 @@ var unmarshalCluster = []struct {
},
Spec: PostgresSpec{
PostgresqlParam: PostgresqlParam{
PgVersion: "16",
PgVersion: "17",
Parameters: map[string]string{
"shared_buffers": "32MB",
"max_connections": "10",
@ -339,7 +339,7 @@ var unmarshalCluster = []struct {
},
Error: "",
},
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"16","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"17","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
err: nil},
{
about: "example with clone",
@ -404,7 +404,7 @@ var postgresqlList = []struct {
out PostgresqlList
err error
}{
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"16"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"17"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
PostgresqlList{
TypeMeta: metav1.TypeMeta{
Kind: "List",
@ -425,7 +425,7 @@ var postgresqlList = []struct {
},
Spec: PostgresSpec{
ClusterName: "testcluster42",
PostgresqlParam: PostgresqlParam{PgVersion: "16"},
PostgresqlParam: PostgresqlParam{PgVersion: "17"},
Volume: Volume{Size: "10Gi"},
TeamID: "acid",
AllowedSourceRanges: []string{"185.85.220.0/22"},

View File

@ -1277,6 +1277,11 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -1336,6 +1341,16 @@ func (in *Stream) DeepCopyInto(out *Stream) {
*out = new(uint32)
**out = **in
}
if in.CPU != nil {
in, out := &in.CPU, &out.CPU
*out = new(string)
**out = **in
}
if in.Memory != nil {
in, out := &in.Memory, &out.Memory
*out = new(string)
**out = **in
}
if in.EnableRecovery != nil {
in, out := &in.EnableRecovery, &out.EnableRecovery
*out = new(bool)
@ -1357,6 +1372,11 @@ func (in *Stream) DeepCopy() *Stream {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StreamTable) DeepCopyInto(out *StreamTable) {
*out = *in
if in.IgnoreRecovery != nil {
in, out := &in.IgnoreRecovery, &out.IgnoreRecovery
*out = new(bool)
**out = **in
}
if in.IdColumn != nil {
in, out := &in.IdColumn, &out.IdColumn
*out = new(string)

View File

@ -65,11 +65,11 @@ type kubeResources struct {
PatroniConfigMaps map[string]*v1.ConfigMap
Secrets map[types.UID]*v1.Secret
Statefulset *appsv1.StatefulSet
VolumeClaims map[types.UID]*v1.PersistentVolumeClaim
PodDisruptionBudget *policyv1.PodDisruptionBudget
LogicalBackupJob *batchv1.CronJob
Streams map[string]*zalandov1.FabricEventStream
//Pods are treated separately
//PVCs are treated separately
}
// Cluster describes postgresql cluster
@ -140,6 +140,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
Endpoints: make(map[PostgresRole]*v1.Endpoints),
PatroniEndpoints: make(map[string]*v1.Endpoints),
PatroniConfigMaps: make(map[string]*v1.ConfigMap),
VolumeClaims: make(map[types.UID]*v1.PersistentVolumeClaim),
Streams: make(map[string]*zalandov1.FabricEventStream)},
userSyncStrategy: users.DefaultUserSyncStrategy{
PasswordEncryption: passwordEncryption,
@ -363,6 +364,11 @@ func (c *Cluster) Create() (err error) {
c.logger.Infof("pods are ready")
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready")
// sync volume may already transition volumes to gp3, if iops/throughput or type is specified
if err = c.syncVolumes(); err != nil {
return err
}
// sync resources created by Patroni
if err = c.syncPatroniResources(); err != nil {
c.logger.Warnf("Patroni resources not yet synced: %v", err)
@ -1014,7 +1020,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
initUsers := !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser
if initUsers {
c.logger.Debugf("initialize users")
c.logger.Debug("initialize users")
if err := c.initUsers(); err != nil {
c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err)
userInitFailed = true
@ -1023,7 +1029,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
}
}
if initUsers || annotationsChanged {
c.logger.Debugf("syncing secrets")
c.logger.Debug("syncing secrets")
//TODO: mind the secrets of the deleted/new users
if err := c.syncSecrets(); err != nil {
c.logger.Errorf("could not sync secrets: %v", err)
@ -1065,7 +1071,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// create if it did not exist
if !oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup {
c.logger.Debugf("creating backup cron job")
c.logger.Debug("creating backup cron job")
if err := c.createLogicalBackupJob(); err != nil {
c.logger.Errorf("could not create a k8s cron job for logical backups: %v", err)
updateFailed = true
@ -1075,7 +1081,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// delete if no longer needed
if oldSpec.Spec.EnableLogicalBackup && !newSpec.Spec.EnableLogicalBackup {
c.logger.Debugf("deleting backup cron job")
c.logger.Debug("deleting backup cron job")
if err := c.deleteLogicalBackupJob(); err != nil {
c.logger.Errorf("could not delete a k8s cron job for logical backups: %v", err)
updateFailed = true
@ -1095,7 +1101,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// Roles and Databases
if !userInitFailed && !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
c.logger.Debugf("syncing roles")
c.logger.Debug("syncing roles")
if err := c.syncRoles(); err != nil {
c.logger.Errorf("could not sync roles: %v", err)
updateFailed = true
@ -1390,18 +1396,18 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}}
}
var searchPath strings.Builder
searchPath.WriteString(constants.DefaultSearchPath)
searchPathArr := []string{constants.DefaultSearchPath}
for preparedSchemaName := range preparedSchemas {
searchPath.WriteString(", " + preparedSchemaName)
searchPathArr = append(searchPathArr, fmt.Sprintf("%q", preparedSchemaName))
}
searchPath := strings.Join(searchPathArr, ", ")
// default roles per database
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil {
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath, preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
}
if preparedDB.DefaultUsers {
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil {
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath, preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
}
}
@ -1412,14 +1418,16 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
if err := c.initDefaultRoles(defaultRoles,
preparedDbName+constants.OwnerRoleNameSuffix,
preparedDbName+"_"+preparedSchemaName,
constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil {
fmt.Sprintf("%s, %q", constants.DefaultSearchPath, preparedSchemaName),
preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err)
}
if preparedSchema.DefaultUsers {
if err := c.initDefaultRoles(defaultUsers,
preparedDbName+constants.OwnerRoleNameSuffix,
preparedDbName+"_"+preparedSchemaName,
constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil {
fmt.Sprintf("%s, %q", constants.DefaultSearchPath, preparedSchemaName),
preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err)
}
}

View File

@ -71,11 +71,11 @@ var cl = New(
Spec: acidv1.PostgresSpec{
EnableConnectionPooler: util.True(),
Streams: []acidv1.Stream{
acidv1.Stream{
{
ApplicationId: "test-app",
Database: "test_db",
Tables: map[string]acidv1.StreamTable{
"test_table": acidv1.StreamTable{
"test_table": {
EventType: "test-app.test",
},
},
@ -95,6 +95,7 @@ func TestCreate(t *testing.T) {
client := k8sutil.KubernetesClient{
DeploymentsGetter: clientSet.AppsV1(),
CronJobsGetter: clientSet.BatchV1(),
EndpointsGetter: clientSet.CoreV1(),
PersistentVolumeClaimsGetter: clientSet.CoreV1(),
PodDisruptionBudgetsGetter: clientSet.PolicyV1(),
@ -111,6 +112,7 @@ func TestCreate(t *testing.T) {
Namespace: clusterNamespace,
},
Spec: acidv1.PostgresSpec{
EnableLogicalBackup: true,
Volume: acidv1.Volume{
Size: "1Gi",
},
@ -1504,7 +1506,7 @@ func newCronJob(image, schedule string, vars []v1.EnvVar, mounts []v1.VolumeMoun
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
v1.Container{
{
Name: "logical-backup",
Image: image,
Env: vars,

View File

@ -591,7 +591,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
// Lack of connection pooler objects is not a fatal error, just log it if
// it was present before in the manifest
if c.ConnectionPooler[role] == nil || role == "" {
c.logger.Debugf("no connection pooler to delete")
c.logger.Debug("no connection pooler to delete")
return nil
}
@ -622,7 +622,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
// Repeat the same for the service object
service := c.ConnectionPooler[role].Service
if service == nil {
c.logger.Debugf("no connection pooler service object to delete")
c.logger.Debug("no connection pooler service object to delete")
} else {
err = c.KubeClient.

View File

@ -969,7 +969,7 @@ func TestPoolerTLS(t *testing.T) {
TLS: &acidv1.TLSDescription{
SecretName: tlsSecretName, CAFile: "ca.crt"},
AdditionalVolumes: []acidv1.AdditionalVolume{
acidv1.AdditionalVolume{
{
Name: tlsSecretName,
MountPath: mountPath,
VolumeSource: v1.VolumeSource{

View File

@ -111,7 +111,7 @@ func (c *Cluster) pgConnectionString(dbname string) string {
func (c *Cluster) databaseAccessDisabled() bool {
if !c.OpConfig.EnableDBAccess {
c.logger.Debugf("database access is disabled")
c.logger.Debug("database access is disabled")
}
return !c.OpConfig.EnableDBAccess

View File

@ -739,7 +739,7 @@ func (c *Cluster) generateSidecarContainers(sidecars []acidv1.Sidecar,
}
// adds common fields to sidecars
func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string, logger *logrus.Entry) []v1.Container {
func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string) []v1.Container {
result := []v1.Container{}
for _, container := range in {
@ -1222,6 +1222,7 @@ func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.Resour
Resources: *resources,
Env: sidecar.Env,
Ports: sidecar.Ports,
Command: sidecar.Command,
}
}
@ -1444,7 +1445,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
containerName, containerName)
}
sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger)
sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername))
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
@ -1598,7 +1599,7 @@ func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]s
for k, v := range c.OpConfig.CustomPodAnnotations {
annotations[k] = v
}
if spec != nil || spec.PodAnnotations != nil {
if spec.PodAnnotations != nil {
for k, v := range spec.PodAnnotations {
annotations[k] = v
}
@ -1859,7 +1860,7 @@ func (c *Cluster) generatePersistentVolumeClaimTemplate(volumeSize, volumeStorag
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: quantity,
},
@ -1875,18 +1876,16 @@ func (c *Cluster) generatePersistentVolumeClaimTemplate(volumeSize, volumeStorag
func (c *Cluster) generateUserSecrets() map[string]*v1.Secret {
secrets := make(map[string]*v1.Secret, len(c.pgUsers)+len(c.systemUsers))
namespace := c.Namespace
for username, pgUser := range c.pgUsers {
//Skip users with no password i.e. human users (they'll be authenticated using pam)
secret := c.generateSingleUserSecret(pgUser.Namespace, pgUser)
secret := c.generateSingleUserSecret(pgUser)
if secret != nil {
secrets[username] = secret
}
namespace = pgUser.Namespace
}
/* special case for the system user */
for _, systemUser := range c.systemUsers {
secret := c.generateSingleUserSecret(namespace, systemUser)
secret := c.generateSingleUserSecret(systemUser)
if secret != nil {
secrets[systemUser.Name] = secret
}
@ -1895,7 +1894,7 @@ func (c *Cluster) generateUserSecrets() map[string]*v1.Secret {
return secrets
}
func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) *v1.Secret {
func (c *Cluster) generateSingleUserSecret(pgUser spec.PgUser) *v1.Secret {
//Skip users with no password i.e. human users (they'll be authenticated using pam)
if pgUser.Password == "" {
if pgUser.Origin != spec.RoleOriginTeamsAPI {

View File

@ -72,18 +72,18 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
}{
{
subtest: "Patroni default configuration",
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
patroni: &acidv1.Patroni{},
opConfig: &config.Config{
Auth: config.Auth{
PamRoleName: "zalandos",
},
},
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`,
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`,
},
{
subtest: "Patroni configured",
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
patroni: &acidv1.Patroni{
InitDB: map[string]string{
"encoding": "UTF8",
@ -102,38 +102,38 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
FailsafeMode: util.True(),
},
opConfig: &config.Config{},
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`,
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`,
},
{
subtest: "Patroni failsafe_mode configured globally",
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
patroni: &acidv1.Patroni{},
opConfig: &config.Config{
EnablePatroniFailsafeMode: util.True(),
},
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
},
{
subtest: "Patroni failsafe_mode configured globally, disabled for cluster",
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
patroni: &acidv1.Patroni{
FailsafeMode: util.False(),
},
opConfig: &config.Config{
EnablePatroniFailsafeMode: util.True(),
},
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`,
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`,
},
{
subtest: "Patroni failsafe_mode disabled globally, configured for cluster",
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
patroni: &acidv1.Patroni{
FailsafeMode: util.True(),
},
opConfig: &config.Config{
EnablePatroniFailsafeMode: util.False(),
},
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
},
}
for _, tt := range tests {
@ -164,15 +164,15 @@ func TestExtractPgVersionFromBinPath(t *testing.T) {
},
{
subTest: "test current bin path against hard coded template",
binPath: "/usr/lib/postgresql/16/bin",
binPath: "/usr/lib/postgresql/17/bin",
template: pgBinariesLocationTemplate,
expected: "16",
expected: "17",
},
{
subTest: "test alternative bin path against a matching template",
binPath: "/usr/pgsql-16/bin",
binPath: "/usr/pgsql-17/bin",
template: "/usr/pgsql-%v/bin",
expected: "16",
expected: "17",
},
}
@ -1451,9 +1451,9 @@ func TestNodeAffinity(t *testing.T) {
nodeAff := &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
v1.NodeSelectorRequirement{
{
Key: "test-label",
Operator: v1.NodeSelectorOpIn,
Values: []string{
@ -1673,7 +1673,7 @@ func TestTLS(t *testing.T) {
TLS: &acidv1.TLSDescription{
SecretName: tlsSecretName, CAFile: "ca.crt"},
AdditionalVolumes: []acidv1.AdditionalVolume{
acidv1.AdditionalVolume{
{
Name: tlsSecretName,
MountPath: mountPath,
VolumeSource: v1.VolumeSource{
@ -2148,7 +2148,7 @@ func TestSidecars(t *testing.T) {
spec = acidv1.PostgresSpec{
PostgresqlParam: acidv1.PostgresqlParam{
PgVersion: "16",
PgVersion: "17",
Parameters: map[string]string{
"max_connections": "100",
},
@ -2162,17 +2162,17 @@ func TestSidecars(t *testing.T) {
Size: "1G",
},
Sidecars: []acidv1.Sidecar{
acidv1.Sidecar{
{
Name: "cluster-specific-sidecar",
},
acidv1.Sidecar{
{
Name: "cluster-specific-sidecar-with-resources",
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
},
},
acidv1.Sidecar{
{
Name: "replace-sidecar",
DockerImage: "override-image",
},
@ -2200,11 +2200,11 @@ func TestSidecars(t *testing.T) {
"deprecated-global-sidecar": "image:123",
},
SidecarContainers: []v1.Container{
v1.Container{
{
Name: "global-sidecar",
},
// will be replaced by a cluster specific sidecar with the same name
v1.Container{
{
Name: "replace-sidecar",
Image: "replaced-image",
},
@ -2259,7 +2259,7 @@ func TestSidecars(t *testing.T) {
},
}
mounts := []v1.VolumeMount{
v1.VolumeMount{
{
Name: "pgdata",
MountPath: "/home/postgres/pgdata",
},
@ -2516,17 +2516,17 @@ func TestGenerateService(t *testing.T) {
Size: "1G",
},
Sidecars: []acidv1.Sidecar{
acidv1.Sidecar{
{
Name: "cluster-specific-sidecar",
},
acidv1.Sidecar{
{
Name: "cluster-specific-sidecar-with-resources",
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
},
},
acidv1.Sidecar{
{
Name: "replace-sidecar",
DockerImage: "override-image",
},
@ -2555,11 +2555,11 @@ func TestGenerateService(t *testing.T) {
"deprecated-global-sidecar": "image:123",
},
SidecarContainers: []v1.Container{
v1.Container{
{
Name: "global-sidecar",
},
// will be replaced by a cluster specific sidecar with the same name
v1.Container{
{
Name: "replace-sidecar",
Image: "replaced-image",
},
@ -2654,27 +2654,27 @@ func newLBFakeClient() (k8sutil.KubernetesClient, *fake.Clientset) {
func getServices(serviceType v1.ServiceType, sourceRanges []string, extTrafficPolicy, clusterName string) []v1.ServiceSpec {
return []v1.ServiceSpec{
v1.ServiceSpec{
{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Type: serviceType,
},
v1.ServiceSpec{
{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: clusterName + "-pooler", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Selector: map[string]string{"connection-pooler": clusterName + "-pooler"},
Type: serviceType,
},
v1.ServiceSpec{
{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Selector: map[string]string{"spilo-role": "replica", "application": "spilo", "cluster-name": clusterName},
Type: serviceType,
},
v1.ServiceSpec{
{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: clusterName + "-pooler-repl", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
@ -2894,7 +2894,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
},
Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
acidv1.Sidecar{
{
Name: sidecarName,
},
},
@ -2993,6 +2993,44 @@ func TestGenerateResourceRequirements(t *testing.T) {
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
},
},
{
subTest: "test generation of resources when min limits are all set to zero",
config: config.Config{
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: clusterNameLabel,
DefaultCPURequest: "0",
DefaultCPULimit: "0",
MaxCPURequest: "0",
MinCPULimit: "0",
DefaultMemoryRequest: "0",
DefaultMemoryLimit: "0",
MaxMemoryRequest: "0",
MinMemoryLimit: "0",
PodRoleLabel: "spilo-role",
},
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: false,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Resources: &acidv1.Resources{
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("5m"), Memory: k8sutil.StringToPointer("5Mi")},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("5m"), Memory: k8sutil.StringToPointer("5Mi")},
},
},
{
subTest: "test matchLimitsWithRequestsIfSmaller",
config: config.Config{
@ -3095,7 +3133,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
},
Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
acidv1.Sidecar{
{
Name: sidecarName,
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},
@ -3184,7 +3222,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
},
Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
acidv1.Sidecar{
{
Name: sidecarName,
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},

View File

@ -1,12 +1,17 @@
package cluster
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/Masterminds/semver"
"github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// VersionMap Map of version numbers
@ -16,8 +21,14 @@ var VersionMap = map[string]int{
"14": 140000,
"15": 150000,
"16": 160000,
"17": 170000,
}
const (
majorVersionUpgradeSuccessAnnotation = "last-major-upgrade-success"
majorVersionUpgradeFailureAnnotation = "last-major-upgrade-failure"
)
// IsBiggerPostgresVersion Compare two Postgres version numbers
func IsBiggerPostgresVersion(old string, new string) bool {
oldN := VersionMap[old]
@ -34,7 +45,7 @@ func (c *Cluster) GetDesiredMajorVersionAsInt() int {
func (c *Cluster) GetDesiredMajorVersion() string {
if c.Config.OpConfig.MajorVersionUpgradeMode == "full" {
// e.g. current is 12, minimal is 12 allowing 12 to 16 clusters, everything below is upgraded
// e.g. current is 13, minimal is 13 allowing 13 to 17 clusters, everything below is upgraded
if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {
c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)
return c.Config.OpConfig.TargetMajorVersion
@ -54,6 +65,47 @@ func (c *Cluster) isUpgradeAllowedForTeam(owningTeam string) bool {
return util.SliceContains(allowedTeams, owningTeam)
}
func (c *Cluster) annotatePostgresResource(isSuccess bool) error {
annotations := make(map[string]string)
currentTime := metav1.Now().Format("2006-01-02T15:04:05Z")
if isSuccess {
annotations[majorVersionUpgradeSuccessAnnotation] = currentTime
} else {
annotations[majorVersionUpgradeFailureAnnotation] = currentTime
}
patchData, err := metaAnnotationsPatch(annotations)
if err != nil {
c.logger.Errorf("could not form patch for %s postgresql resource: %v", c.Name, err)
return err
}
_, err = c.KubeClient.Postgresqls(c.Namespace).Patch(context.Background(), c.Name, types.MergePatchType, patchData, metav1.PatchOptions{})
if err != nil {
c.logger.Errorf("failed to patch annotations to postgresql resource: %v", err)
return err
}
return nil
}
func (c *Cluster) removeFailuresAnnotation() error {
annotationToRemove := []map[string]string{
{
"op": "remove",
"path": fmt.Sprintf("/metadata/annotations/%s", majorVersionUpgradeFailureAnnotation),
},
}
removePatch, err := json.Marshal(annotationToRemove)
if err != nil {
c.logger.Errorf("could not form removal patch for %s postgresql resource: %v", c.Name, err)
return err
}
_, err = c.KubeClient.Postgresqls(c.Namespace).Patch(context.Background(), c.Name, types.JSONPatchType, removePatch, metav1.PatchOptions{})
if err != nil {
c.logger.Errorf("failed to remove annotations from postgresql resource: %v", err)
return err
}
return nil
}
/*
Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is "off").
@ -69,6 +121,10 @@ func (c *Cluster) majorVersionUpgrade() error {
desiredVersion := c.GetDesiredMajorVersionAsInt()
if c.currentMajorVersion >= desiredVersion {
if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists { // if failure annotation exists, remove it
c.removeFailuresAnnotation()
c.logger.Infof("removing failure annotation as the cluster is already up to date")
}
c.logger.Infof("cluster version up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion)
return nil
}
@ -90,23 +146,75 @@ func (c *Cluster) majorVersionUpgrade() error {
for i, pod := range pods {
ps, _ := c.patroni.GetMemberData(&pod)
if ps.Role == "standby_leader" {
c.logger.Errorf("skipping major version upgrade for %s/%s standby cluster. Re-deploy standby cluster with the required Postgres version specified", c.Namespace, c.Name)
return nil
}
if ps.State != "running" {
allRunning = false
c.logger.Infof("identified non running pod, potentially skipping major version upgrade")
}
if ps.Role == "master" {
if ps.Role == "master" || ps.Role == "primary" {
masterPod = &pods[i]
c.currentMajorVersion = ps.ServerVersion
}
}
if masterPod == nil {
c.logger.Infof("no master in the cluster, skipping major version upgrade")
return nil
}
// Recheck version with newest data from Patroni
if c.currentMajorVersion >= desiredVersion {
if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists { // if failure annotation exists, remove it
c.removeFailuresAnnotation()
c.logger.Infof("removing failure annotation as the cluster is already up to date")
}
c.logger.Infof("recheck cluster version is already up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion)
return nil
}
if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists {
c.logger.Infof("last major upgrade failed, skipping upgrade")
return nil
}
members, err := c.patroni.GetClusterMembers(masterPod)
if err != nil {
c.logger.Error("could not get cluster members data from Patroni API, skipping major version upgrade")
return err
}
patroniData, err := c.patroni.GetMemberData(masterPod)
if err != nil {
c.logger.Error("could not get members data from Patroni API, skipping major version upgrade")
return err
}
patroniVer, err := semver.NewVersion(patroniData.Patroni.Version)
if err != nil {
c.logger.Error("error parsing Patroni version")
patroniVer, _ = semver.NewVersion("3.0.4")
}
verConstraint, _ := semver.NewConstraint(">= 3.0.4")
checkStreaming, _ := verConstraint.Validate(patroniVer)
for _, member := range members {
if PostgresRole(member.Role) == Leader {
continue
}
if checkStreaming && member.State != "streaming" {
c.logger.Infof("skipping major version upgrade, replica %s is not streaming from primary", member.Name)
return nil
}
if member.Lag > 16*1024*1024 {
c.logger.Infof("skipping major version upgrade, replication lag on member %s is too high", member.Name)
return nil
}
}
isUpgradeSuccess := true
numberOfPods := len(pods)
if allRunning && masterPod != nil {
c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion)
@ -116,27 +224,32 @@ func (c *Cluster) majorVersionUpgrade() error {
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
upgradeCommand := fmt.Sprintf("set -o pipefail && /usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods)
c.logger.Debugf("checking if the spilo image runs with root or non-root (check for user id=0)")
c.logger.Debug("checking if the spilo image runs with root or non-root (check for user id=0)")
resultIdCheck, errIdCheck := c.ExecCommand(podName, "/bin/bash", "-c", "/usr/bin/id -u")
if errIdCheck != nil {
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "checking user id to run upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, errIdCheck)
}
resultIdCheck = strings.TrimSuffix(resultIdCheck, "\n")
var result string
var result, scriptErrMsg string
if resultIdCheck != "0" {
c.logger.Infof("user id was identified as: %s, hence default user is non-root already", resultIdCheck)
result, err = c.ExecCommand(podName, "/bin/bash", "-c", upgradeCommand)
scriptErrMsg, _ = c.ExecCommand(podName, "/bin/bash", "-c", "tail -n 1 last_upgrade.log")
} else {
c.logger.Infof("user id was identified as: %s, using su to reach the postgres user", resultIdCheck)
result, err = c.ExecCommand(podName, "/bin/su", "postgres", "-c", upgradeCommand)
scriptErrMsg, _ = c.ExecCommand(podName, "/bin/bash", "-c", "tail -n 1 last_upgrade.log")
}
if err != nil {
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, err)
return err
isUpgradeSuccess = false
c.annotatePostgresResource(isUpgradeSuccess)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, scriptErrMsg)
return fmt.Errorf(scriptErrMsg)
}
c.logger.Infof("upgrade action triggered and command completed: %s", result[:100])
c.annotatePostgresResource(isUpgradeSuccess)
c.logger.Infof("upgrade action triggered and command completed: %s", result[:100])
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "upgrade from %d to %d finished", c.currentMajorVersion, desiredVersion)
}
}

View File

@ -59,7 +59,7 @@ func (c *Cluster) markRollingUpdateFlagForPod(pod *v1.Pod, msg string) error {
return nil
}
c.logger.Debugf("mark rolling update annotation for %s: reason %s", pod.Name, msg)
c.logger.Infof("mark rolling update annotation for %s: reason %s", pod.Name, msg)
flag := make(map[string]string)
flag[rollingUpdatePodAnnotationKey] = strconv.FormatBool(true)
@ -110,7 +110,7 @@ func (c *Cluster) getRollingUpdateFlagFromPod(pod *v1.Pod) (flag bool) {
}
func (c *Cluster) deletePods() error {
c.logger.Debugln("deleting pods")
c.logger.Debug("deleting pods")
pods, err := c.listPods()
if err != nil {
return err
@ -127,9 +127,9 @@ func (c *Cluster) deletePods() error {
}
}
if len(pods) > 0 {
c.logger.Debugln("pods have been deleted")
c.logger.Debug("pods have been deleted")
} else {
c.logger.Debugln("no pods to delete")
c.logger.Debug("no pods to delete")
}
return nil
@ -230,7 +230,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err)
}
if !eol {
c.logger.Debugf("no action needed: master pod is already on a live node")
c.logger.Debug("no action needed: master pod is already on a live node")
return nil
}
@ -480,6 +480,9 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e
if PostgresRole(member.Role) == SyncStandby {
syncCandidates = append(syncCandidates, member)
}
if PostgresRole(member.Role) != Leader && PostgresRole(member.Role) != StandbyLeader && slices.Contains([]string{"running", "streaming", "in archive recovery"}, member.State) {
candidates = append(candidates, member)
}
}
// if synchronous mode is enabled and no SyncStandy was found
@ -489,6 +492,12 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e
return false, nil
}
// retry also in asynchronous mode when no replica candidate was found
if !c.Spec.Patroni.SynchronousMode && len(candidates) == 0 {
c.logger.Warnf("no replica candidate found - retrying fetching cluster members")
return false, nil
}
return true, nil
},
)
@ -502,24 +511,12 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e
return syncCandidates[i].Lag < syncCandidates[j].Lag
})
return spec.NamespacedName{Namespace: master.Namespace, Name: syncCandidates[0].Name}, nil
} else {
// in asynchronous mode find running replicas
for _, member := range members {
if PostgresRole(member.Role) == Leader || PostgresRole(member.Role) == StandbyLeader {
continue
}
if slices.Contains([]string{"running", "streaming", "in archive recovery"}, member.State) {
candidates = append(candidates, member)
}
}
if len(candidates) > 0 {
sort.Slice(candidates, func(i, j int) bool {
return candidates[i].Lag < candidates[j].Lag
})
return spec.NamespacedName{Namespace: master.Namespace, Name: candidates[0].Name}, nil
}
}
if len(candidates) > 0 {
sort.Slice(candidates, func(i, j int) bool {
return candidates[i].Lag < candidates[j].Lag
})
return spec.NamespacedName{Namespace: master.Namespace, Name: candidates[0].Name}, nil
}
return spec.NamespacedName{}, fmt.Errorf("no switchover candidate found")

View File

@ -62,7 +62,7 @@ func TestGetSwitchoverCandidate(t *testing.T) {
expectedError: nil,
},
{
subtest: "choose first replica when lag is equal evrywhere",
subtest: "choose first replica when lag is equal everywhere",
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 5}]}`,
syncModeEnabled: false,
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"},
@ -73,7 +73,7 @@ func TestGetSwitchoverCandidate(t *testing.T) {
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 2}, {"name": "acid-test-cluster-1", "role": "replica", "state": "starting", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 2}]}`,
syncModeEnabled: false,
expectedCandidate: spec.NamespacedName{},
expectedError: fmt.Errorf("no switchover candidate found"),
expectedError: fmt.Errorf("failed to get Patroni cluster members: unexpected end of JSON input"),
},
{
subtest: "replicas with different status",

View File

@ -39,8 +39,8 @@ func (c *Cluster) listResources() error {
c.logger.Infof("found logical backup job: %q (uid: %q)", util.NameFromMeta(c.LogicalBackupJob.ObjectMeta), c.LogicalBackupJob.UID)
}
for _, secret := range c.Secrets {
c.logger.Infof("found secret: %q (uid: %q) namespace: %s", util.NameFromMeta(secret.ObjectMeta), secret.UID, secret.ObjectMeta.Namespace)
for uid, secret := range c.Secrets {
c.logger.Infof("found secret: %q (uid: %q) namespace: %s", util.NameFromMeta(secret.ObjectMeta), uid, secret.ObjectMeta.Namespace)
}
for role, service := range c.Services {
@ -70,13 +70,8 @@ func (c *Cluster) listResources() error {
c.logger.Infof("found pod: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
}
pvcs, err := c.listPersistentVolumeClaims()
if err != nil {
return fmt.Errorf("could not get the list of PVCs: %v", err)
}
for _, obj := range pvcs {
c.logger.Infof("found PVC: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
for uid, pvc := range c.VolumeClaims {
c.logger.Infof("found persistent volume claim: %q (uid: %q)", util.NameFromMeta(pvc.ObjectMeta), uid)
}
for role, poolerObjs := range c.ConnectionPooler {
@ -187,7 +182,7 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
c.logger.Warningf("could not scale down: %v", err)
}
}
c.logger.Debugf("updating statefulset")
c.logger.Debug("updating statefulset")
patchData, err := specPatch(newStatefulSet.Spec)
if err != nil {
@ -218,7 +213,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
}
statefulSetName := util.NameFromMeta(c.Statefulset.ObjectMeta)
c.logger.Debugf("replacing statefulset")
c.logger.Debug("replacing statefulset")
// Delete the current statefulset without deleting the pods
deletePropagationPolicy := metav1.DeletePropagationOrphan
@ -232,7 +227,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
// make sure we clear the stored statefulset status if the subsequent create fails.
c.Statefulset = nil
// wait until the statefulset is truly deleted
c.logger.Debugf("waiting for the statefulset to be deleted")
c.logger.Debug("waiting for the statefulset to be deleted")
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
func() (bool, error) {
@ -266,7 +261,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
func (c *Cluster) deleteStatefulSet() error {
c.setProcessName("deleting statefulset")
c.logger.Debugln("deleting statefulset")
c.logger.Debug("deleting statefulset")
if c.Statefulset == nil {
c.logger.Debug("there is no statefulset in the cluster")
return nil
@ -288,10 +283,10 @@ func (c *Cluster) deleteStatefulSet() error {
if c.OpConfig.EnablePersistentVolumeClaimDeletion != nil && *c.OpConfig.EnablePersistentVolumeClaimDeletion {
if err := c.deletePersistentVolumeClaims(); err != nil {
return fmt.Errorf("could not delete PersistentVolumeClaims: %v", err)
return fmt.Errorf("could not delete persistent volume claims: %v", err)
}
} else {
c.logger.Info("not deleting PersistentVolumeClaims because disabled in configuration")
c.logger.Info("not deleting persistent volume claims because disabled in configuration")
}
return nil
@ -349,7 +344,8 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe
}
func (c *Cluster) deleteService(role PostgresRole) error {
c.logger.Debugf("deleting service %s", role)
c.setProcessName("deleting service")
c.logger.Debugf("deleting %s service", role)
if c.Services[role] == nil {
c.logger.Debugf("No service for %s role was found, nothing to delete", role)
@ -495,7 +491,7 @@ func (c *Cluster) deletePodDisruptionBudget() error {
func (c *Cluster) deleteEndpoint(role PostgresRole) error {
c.setProcessName("deleting endpoint")
c.logger.Debugln("deleting endpoint")
c.logger.Debugf("deleting %s endpoint", role)
if c.Endpoints[role] == nil {
c.logger.Debugf("there is no %s endpoint in the cluster", role)
return nil
@ -543,7 +539,7 @@ func (c *Cluster) deletePatroniResources() error {
func (c *Cluster) deletePatroniConfigMap(suffix string) error {
c.setProcessName("deleting Patroni config map")
c.logger.Debugln("deleting Patroni config map")
c.logger.Debugf("deleting %s Patroni config map", suffix)
cm := c.PatroniConfigMaps[suffix]
if cm == nil {
c.logger.Debugf("there is no %s Patroni config map in the cluster", suffix)
@ -565,7 +561,7 @@ func (c *Cluster) deletePatroniConfigMap(suffix string) error {
func (c *Cluster) deletePatroniEndpoint(suffix string) error {
c.setProcessName("deleting Patroni endpoint")
c.logger.Debugln("deleting Patroni endpoint")
c.logger.Debugf("deleting %s Patroni endpoint", suffix)
ep := c.PatroniEndpoints[suffix]
if ep == nil {
c.logger.Debugf("there is no %s Patroni endpoint in the cluster", suffix)

View File

@ -46,11 +46,13 @@ func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) (p
func (c *Cluster) deleteStream(appId string) error {
c.setProcessName("deleting event stream")
c.logger.Debugf("deleting event stream with applicationId %s", appId)
err := c.KubeClient.FabricEventStreams(c.Streams[appId].Namespace).Delete(context.TODO(), c.Streams[appId].Name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("could not delete event stream %q with applicationId %s: %v", c.Streams[appId].Name, appId, err)
}
c.logger.Infof("event stream %q with applicationId %s has been successfully deleted", c.Streams[appId].Name, appId)
delete(c.Streams, appId)
return nil
@ -176,16 +178,25 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za
func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEventStream {
eventStreams := make([]zalandov1.EventStream, 0)
resourceAnnotations := map[string]string{}
var err, err2 error
for _, stream := range c.Spec.Streams {
if stream.ApplicationId != appId {
continue
}
err = setResourceAnnotation(&resourceAnnotations, stream.CPU, constants.EventStreamCpuAnnotationKey)
err2 = setResourceAnnotation(&resourceAnnotations, stream.Memory, constants.EventStreamMemoryAnnotationKey)
if err != nil || err2 != nil {
c.logger.Warningf("could not set resource annotation for event stream: %v", err)
}
for tableName, table := range stream.Tables {
streamSource := c.getEventStreamSource(stream, tableName, table.IdColumn)
streamFlow := getEventStreamFlow(stream, table.PayloadColumn)
streamFlow := getEventStreamFlow(table.PayloadColumn)
streamSink := getEventStreamSink(stream, table.EventType)
streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType)
streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType, table.IgnoreRecovery)
eventStreams = append(eventStreams, zalandov1.EventStream{
EventStreamFlow: streamFlow,
@ -205,7 +216,7 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent
Name: fmt.Sprintf("%s-%s", c.Name, strings.ToLower(util.RandomPassword(5))),
Namespace: c.Namespace,
Labels: c.labelsSet(true),
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
Annotations: c.AnnotationsToPropagate(c.annotationsSet(resourceAnnotations)),
OwnerReferences: c.ownerReferences(),
},
Spec: zalandov1.FabricEventStreamSpec{
@ -215,6 +226,27 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent
}
}
func setResourceAnnotation(annotations *map[string]string, resource *string, key string) error {
var (
isSmaller bool
err error
)
if resource != nil {
currentValue, exists := (*annotations)[key]
if exists {
isSmaller, err = util.IsSmallerQuantity(currentValue, *resource)
if err != nil {
return fmt.Errorf("could not compare resource in %q annotation: %v", key, err)
}
}
if isSmaller || !exists {
(*annotations)[key] = *resource
}
}
return nil
}
func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, idColumn *string) zalandov1.EventStreamSource {
table, schema := getTableSchema(tableName)
streamFilter := stream.Filter[tableName]
@ -230,7 +262,7 @@ func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, i
}
}
func getEventStreamFlow(stream acidv1.Stream, payloadColumn *string) zalandov1.EventStreamFlow {
func getEventStreamFlow(payloadColumn *string) zalandov1.EventStreamFlow {
return zalandov1.EventStreamFlow{
Type: constants.EventStreamFlowPgGenericType,
PayloadColumn: payloadColumn,
@ -245,7 +277,7 @@ func getEventStreamSink(stream acidv1.Stream, eventType string) zalandov1.EventS
}
}
func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string) zalandov1.EventStreamRecovery {
func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string, ignoreRecovery *bool) zalandov1.EventStreamRecovery {
if (stream.EnableRecovery != nil && !*stream.EnableRecovery) ||
(stream.EnableRecovery == nil && recoveryEventType == "") {
return zalandov1.EventStreamRecovery{
@ -253,6 +285,12 @@ func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType s
}
}
if ignoreRecovery != nil && *ignoreRecovery {
return zalandov1.EventStreamRecovery{
Type: constants.EventStreamRecoveryIgnoreType,
}
}
if stream.EnableRecovery != nil && *stream.EnableRecovery && recoveryEventType == "" {
recoveryEventType = fmt.Sprintf("%s-%s", eventType, constants.EventStreamRecoverySuffix)
}
@ -308,7 +346,7 @@ func (c *Cluster) syncStreams() error {
_, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{})
if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("event stream CRD not installed, skipping")
c.logger.Debug("event stream CRD not installed, skipping")
return nil
}
@ -440,7 +478,9 @@ func (c *Cluster) syncStream(appId string) error {
c.setProcessName("syncing stream with applicationId %s", appId)
c.logger.Debugf("syncing stream with applicationId %s", appId)
listOptions := metav1.ListOptions{LabelSelector: c.labelsSet(true).String()}
listOptions := metav1.ListOptions{
LabelSelector: c.labelsSet(false).String(),
}
streams, err = c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions)
if err != nil {
return fmt.Errorf("could not list of FabricEventStreams for applicationId %s: %v", appId, err)
@ -451,15 +491,6 @@ func (c *Cluster) syncStream(appId string) error {
if stream.Spec.ApplicationId != appId {
continue
}
if streamExists {
c.logger.Warningf("more than one event stream with applicationId %s found, delete it", appId)
if err = c.KubeClient.FabricEventStreams(stream.ObjectMeta.Namespace).Delete(context.TODO(), stream.ObjectMeta.Name, metav1.DeleteOptions{}); err != nil {
c.logger.Errorf("could not delete event stream %q with applicationId %s: %v", stream.ObjectMeta.Name, appId, err)
} else {
c.logger.Infof("redundant event stream %q with applicationId %s has been successfully deleted", stream.ObjectMeta.Name, appId)
}
continue
}
streamExists = true
desiredStreams := c.generateFabricEventStream(appId)
if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) {
@ -473,8 +504,9 @@ func (c *Cluster) syncStream(appId string) error {
c.Streams[appId] = stream
}
if match, reason := c.compareStreams(&stream, desiredStreams); !match {
c.logger.Debugf("updating event streams with applicationId %s: %s", appId, reason)
desiredStreams.ObjectMeta = stream.ObjectMeta
c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason)
// make sure to keep the old name with randomly generated suffix
desiredStreams.ObjectMeta.Name = stream.ObjectMeta.Name
updatedStream, err := c.updateStreams(desiredStreams)
if err != nil {
return fmt.Errorf("failed updating event streams %s with applicationId %s: %v", stream.Name, appId, err)
@ -482,6 +514,7 @@ func (c *Cluster) syncStream(appId string) error {
c.Streams[appId] = updatedStream
c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId)
}
break
}
if !streamExists {
@ -499,15 +532,29 @@ func (c *Cluster) syncStream(appId string) error {
func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.FabricEventStream) (match bool, reason string) {
reasons := make([]string, 0)
desiredAnnotations := make(map[string]string)
match = true
// stream operator can add extra annotations so incl. current annotations in desired annotations
desiredAnnotations := c.annotationsSet(curEventStreams.Annotations)
for curKey, curValue := range curEventStreams.Annotations {
if _, exists := desiredAnnotations[curKey]; !exists {
desiredAnnotations[curKey] = curValue
}
}
// add/or override annotations if cpu and memory values were changed
for newKey, newValue := range newEventStreams.Annotations {
desiredAnnotations[newKey] = newValue
}
if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations); changed {
match = false
reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason))
}
if !reflect.DeepEqual(curEventStreams.ObjectMeta.Labels, newEventStreams.ObjectMeta.Labels) {
match = false
reasons = append(reasons, "new streams labels do not match the current ones")
}
if changed, reason := sameEventStreams(curEventStreams.Spec.EventStreams, newEventStreams.Spec.EventStreams); !changed {
match = false
reasons = append(reasons, fmt.Sprintf("new streams EventStreams array does not match : %s", reason))
@ -550,7 +597,6 @@ func (c *Cluster) cleanupRemovedStreams(appIds []string) error {
if err != nil {
errors = append(errors, fmt.Sprintf("failed deleting event streams with applicationId %s: %v", appId, err))
}
c.logger.Infof("event streams with applicationId %s have been successfully deleted", appId)
}
}

View File

@ -56,21 +56,26 @@ var (
ApplicationId: appId,
Database: "foo",
Tables: map[string]acidv1.StreamTable{
"data.bar": acidv1.StreamTable{
"data.bar": {
EventType: "stream-type-a",
IdColumn: k8sutil.StringToPointer("b_id"),
PayloadColumn: k8sutil.StringToPointer("b_payload"),
},
"data.foobar": acidv1.StreamTable{
"data.foobar": {
EventType: "stream-type-b",
RecoveryEventType: "stream-type-b-dlq",
},
"data.foofoobar": {
EventType: "stream-type-c",
IgnoreRecovery: util.True(),
},
},
EnableRecovery: util.True(),
Filter: map[string]*string{
"data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"),
},
BatchSize: k8sutil.UInt32ToPointer(uint32(100)),
CPU: k8sutil.StringToPointer("250m"),
},
},
TeamID: "acid",
@ -88,13 +93,16 @@ var (
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-12345", clusterName),
Namespace: namespace,
Annotations: map[string]string{
constants.EventStreamCpuAnnotationKey: "250m",
},
Labels: map[string]string{
"application": "spilo",
"cluster-name": fmt.Sprintf("%s-2", clusterName),
"cluster-name": clusterName,
"team": "acid",
},
OwnerReferences: []metav1.OwnerReference{
metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "StatefulSet",
Name: "acid-test-cluster",
@ -105,7 +113,7 @@ var (
Spec: zalandov1.FabricEventStreamSpec{
ApplicationId: appId,
EventStreams: []zalandov1.EventStream{
zalandov1.EventStream{
{
EventStreamFlow: zalandov1.EventStreamFlow{
PayloadColumn: k8sutil.StringToPointer("b_payload"),
Type: constants.EventStreamFlowPgGenericType,
@ -144,7 +152,7 @@ var (
Type: constants.EventStreamSourcePGType,
},
},
zalandov1.EventStream{
{
EventStreamFlow: zalandov1.EventStreamFlow{
Type: constants.EventStreamFlowPgGenericType,
},
@ -180,6 +188,37 @@ var (
Type: constants.EventStreamSourcePGType,
},
},
{
EventStreamFlow: zalandov1.EventStreamFlow{
Type: constants.EventStreamFlowPgGenericType,
},
EventStreamRecovery: zalandov1.EventStreamRecovery{
Type: constants.EventStreamRecoveryIgnoreType,
},
EventStreamSink: zalandov1.EventStreamSink{
EventType: "stream-type-c",
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
Type: constants.EventStreamSinkNakadiType,
},
EventStreamSource: zalandov1.EventStreamSource{
Connection: zalandov1.Connection{
DBAuth: zalandov1.DBAuth{
Name: fmt.Sprintf("fes-user.%s.credentials.postgresql.acid.zalan.do", clusterName),
PasswordKey: "password",
Type: constants.EventStreamSourceAuthType,
UserKey: "username",
},
Url: fmt.Sprintf("jdbc:postgresql://%s.%s/foo?user=%s&ssl=true&sslmode=require", clusterName, namespace, fesUser),
SlotName: slotName,
PluginType: constants.EventStreamSourcePluginType,
},
Schema: "data",
EventStreamTable: zalandov1.EventStreamTable{
Name: "foofoobar",
},
Type: constants.EventStreamSourcePGType,
},
},
},
},
}
@ -241,7 +280,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical",
},
Publication: map[string]acidv1.StreamTable{
"test1": acidv1.StreamTable{
"test1": {
EventType: "stream-type-a",
},
},
@ -249,7 +288,7 @@ func TestHasSlotsInSync(t *testing.T) {
},
},
actualSlots: map[string]map[string]string{
slotName: map[string]string{
slotName: {
"databases": dbName,
"plugin": constants.EventStreamSourcePluginType,
"type": "logical",
@ -268,7 +307,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical",
},
Publication: map[string]acidv1.StreamTable{
"test1": acidv1.StreamTable{
"test1": {
EventType: "stream-type-a",
},
},
@ -289,7 +328,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical",
},
Publication: map[string]acidv1.StreamTable{
"test1": acidv1.StreamTable{
"test1": {
EventType: "stream-type-a",
},
},
@ -312,7 +351,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical",
},
Publication: map[string]acidv1.StreamTable{
"test1": acidv1.StreamTable{
"test1": {
EventType: "stream-type-a",
},
},
@ -326,7 +365,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical",
},
Publication: map[string]acidv1.StreamTable{
"test2": acidv1.StreamTable{
"test2": {
EventType: "stream-type-b",
},
},
@ -334,7 +373,7 @@ func TestHasSlotsInSync(t *testing.T) {
},
},
actualSlots: map[string]map[string]string{
slotName: map[string]string{
slotName: {
"databases": dbName,
"plugin": constants.EventStreamSourcePluginType,
"type": "logical",
@ -353,7 +392,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical",
},
Publication: map[string]acidv1.StreamTable{
"test1": acidv1.StreamTable{
"test1": {
EventType: "stream-type-a",
},
},
@ -367,7 +406,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical",
},
Publication: map[string]acidv1.StreamTable{
"test2": acidv1.StreamTable{
"test2": {
EventType: "stream-type-b",
},
},
@ -375,7 +414,7 @@ func TestHasSlotsInSync(t *testing.T) {
},
},
actualSlots: map[string]map[string]string{
slotName: map[string]string{
slotName: {
"databases": dbName,
"plugin": constants.EventStreamSourcePluginType,
"type": "logical",
@ -394,7 +433,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical",
},
Publication: map[string]acidv1.StreamTable{
"test1": acidv1.StreamTable{
"test1": {
EventType: "stream-type-a",
},
},
@ -408,7 +447,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical",
},
Publication: map[string]acidv1.StreamTable{
"test2": acidv1.StreamTable{
"test2": {
EventType: "stream-type-b",
},
},
@ -416,7 +455,7 @@ func TestHasSlotsInSync(t *testing.T) {
},
},
actualSlots: map[string]map[string]string{
slotName: map[string]string{
slotName: {
"databases": dbName,
"plugin": constants.EventStreamSourcePluginType,
"type": "logical",
@ -449,7 +488,7 @@ func TestGenerateFabricEventStream(t *testing.T) {
}
listOptions := metav1.ListOptions{
LabelSelector: cluster.labelsSet(true).String(),
LabelSelector: cluster.labelsSet(false).String(),
}
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err)
@ -488,20 +527,20 @@ func newFabricEventStream(streams []zalandov1.EventStream, annotations map[strin
}
func TestSyncStreams(t *testing.T) {
pg.Name = fmt.Sprintf("%s-2", pg.Name)
newClusterName := fmt.Sprintf("%s-2", pg.Name)
pg.Name = newClusterName
var cluster = New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
EnableOwnerReferences: util.True(),
PodRoleLabel: "spilo-role",
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
PodRoleLabel: "spilo-role",
},
},
}, client, pg, logger, eventRecorder)
@ -514,39 +553,23 @@ func TestSyncStreams(t *testing.T) {
err = cluster.syncStream(appId)
assert.NoError(t, err)
// create a second stream with same spec but with different name
createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create(
context.TODO(), fes, metav1.CreateOptions{})
assert.NoError(t, err)
assert.Equal(t, createdStream.Spec.ApplicationId, appId)
// check that two streams exist
listOptions := metav1.ListOptions{
LabelSelector: cluster.labelsSet(true).String(),
}
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err)
assert.Equalf(t, 2, len(streams.Items), "unexpected number of streams found: got %d, but expected only 2", len(streams.Items))
// sync the stream which should remove the redundant stream
// sync the stream again
err = cluster.syncStream(appId)
assert.NoError(t, err)
// check that only one stream remains after sync
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
listOptions := metav1.ListOptions{
LabelSelector: cluster.labelsSet(false).String(),
}
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err)
assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items))
// check owner references
if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) {
t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences)
}
}
func TestSameStreams(t *testing.T) {
testName := "TestSameStreams"
annotationsA := map[string]string{"owned-by": "acid"}
annotationsB := map[string]string{"owned-by": "foo"}
annotationsA := map[string]string{constants.EventStreamMemoryAnnotationKey: "500Mi"}
annotationsB := map[string]string{constants.EventStreamMemoryAnnotationKey: "1Gi"}
stream1 := zalandov1.EventStream{
EventStreamFlow: zalandov1.EventStreamFlow{},
@ -615,42 +638,49 @@ func TestSameStreams(t *testing.T) {
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
match: false,
reason: "number of defined streams is different",
reason: "new streams EventStreams array does not match : number of defined streams is different",
},
{
subTest: "different number of streams",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
match: false,
reason: "number of defined streams is different",
reason: "new streams EventStreams array does not match : number of defined streams is different",
},
{
subTest: "event stream specs differ",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
streamsB: fes,
match: false,
reason: "number of defined streams is different",
reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_CPU\" with value \"250m\"., new streams labels do not match the current ones, new streams EventStreams array does not match : number of defined streams is different",
},
{
subTest: "event stream recovery specs differ",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, nil),
match: false,
reason: "event stream specs differ",
reason: "new streams EventStreams array does not match : event stream specs differ",
},
{
subTest: "event stream with new annotations",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA),
match: false,
reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_MEMORY\" with value \"500Mi\".",
},
{
subTest: "event stream annotations differ",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA),
streamsA: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsA),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsB),
match: false,
reason: "event stream specs differ",
reason: "new streams annotations do not match: \"fes.zalando.org/FES_MEMORY\" changed from \"500Mi\" to \"1Gi\".",
},
}
for _, tt := range tests {
streamsMatch, matchReason := cluster.compareStreams(tt.streamsA, tt.streamsB)
if streamsMatch != tt.match {
t.Errorf("%s %s: unexpected match result when comparing streams: got %s, epxected %s",
if streamsMatch != tt.match || matchReason != tt.reason {
t.Errorf("%s %s: unexpected match result when comparing streams: got %s, expected %s",
testName, tt.subTest, matchReason, tt.reason)
}
}
@ -658,6 +688,105 @@ func TestSameStreams(t *testing.T) {
func TestUpdateStreams(t *testing.T) {
pg.Name = fmt.Sprintf("%s-3", pg.Name)
var cluster = New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
EnableOwnerReferences: util.True(),
PodRoleLabel: "spilo-role",
},
},
}, client, pg, logger, eventRecorder)
_, err := cluster.KubeClient.Postgresqls(namespace).Create(
context.TODO(), &pg, metav1.CreateOptions{})
assert.NoError(t, err)
// create stream with different owner reference
fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name)
fes.ObjectMeta.Labels["cluster-name"] = pg.Name
createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create(
context.TODO(), fes, metav1.CreateOptions{})
assert.NoError(t, err)
assert.Equal(t, createdStream.Spec.ApplicationId, appId)
// sync the stream which should update the owner reference
err = cluster.syncStream(appId)
assert.NoError(t, err)
// check that only one stream exists after sync
listOptions := metav1.ListOptions{
LabelSelector: cluster.labelsSet(true).String(),
}
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err)
assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items))
// compare owner references
if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) {
t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences)
}
// change specs of streams and patch CRD
for i, stream := range pg.Spec.Streams {
if stream.ApplicationId == appId {
streamTable := stream.Tables["data.bar"]
streamTable.EventType = "stream-type-c"
stream.Tables["data.bar"] = streamTable
stream.BatchSize = k8sutil.UInt32ToPointer(uint32(250))
pg.Spec.Streams[i] = stream
}
}
// compare stream returned from API with expected stream
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
result := cluster.generateFabricEventStream(appId)
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
}
// disable recovery
for idx, stream := range pg.Spec.Streams {
if stream.ApplicationId == appId {
stream.EnableRecovery = util.False()
pg.Spec.Streams[idx] = stream
}
}
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
result = cluster.generateFabricEventStream(appId)
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
}
}
func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) {
patchData, err := specPatch(pgSpec)
assert.NoError(t, err)
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
assert.NoError(t, err)
cluster.Postgresql.Spec = pgPatched.Spec
err = cluster.syncStream(appId)
assert.NoError(t, err)
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err)
return streams
}
func TestDeleteStreams(t *testing.T) {
pg.Name = fmt.Sprintf("%s-4", pg.Name)
var cluster = New(
Config{
OpConfig: config.Config{
@ -695,7 +824,7 @@ func TestUpdateStreams(t *testing.T) {
// compare stream returned from API with expected stream
listOptions := metav1.ListOptions{
LabelSelector: cluster.labelsSet(true).String(),
LabelSelector: cluster.labelsSet(false).String(),
}
streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
result := cluster.generateFabricEventStream(appId)
@ -703,6 +832,14 @@ func TestUpdateStreams(t *testing.T) {
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
}
// change teamId and check that stream is updated
pg.Spec.TeamID = "new-team"
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
result = cluster.generateFabricEventStream(appId)
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
t.Errorf("Malformed FabricEventStream after updating teamId, expected %#v, got %#v", streams.Items[0].ObjectMeta.Labels, result.ObjectMeta.Labels)
}
// disable recovery
for idx, stream := range pg.Spec.Streams {
if stream.ApplicationId == appId {
@ -717,9 +854,6 @@ func TestUpdateStreams(t *testing.T) {
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
}
mockClient := k8sutil.NewMockKubernetesClient()
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
// remove streams from manifest
pg.Spec.Streams = nil
pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update(
@ -729,26 +863,29 @@ func TestUpdateStreams(t *testing.T) {
appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams)
cluster.cleanupRemovedStreams(appIds)
// check that streams have been deleted
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
if len(streams.Items) > 0 || err != nil {
t.Errorf("stream resource has not been removed or unexpected error %v", err)
}
}
assert.NoError(t, err)
assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items))
func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) {
patchData, err := specPatch(pgSpec)
// create stream to test deleteStreams code
fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name)
fes.ObjectMeta.Labels["cluster-name"] = pg.Name
_, err = cluster.KubeClient.FabricEventStreams(namespace).Create(
context.TODO(), fes, metav1.CreateOptions{})
assert.NoError(t, err)
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
assert.NoError(t, err)
cluster.Postgresql.Spec = pgPatched.Spec
// sync it once to cluster struct
err = cluster.syncStream(appId)
assert.NoError(t, err)
// we need a mock client because deleteStreams checks for CRD existance
mockClient := k8sutil.NewMockKubernetesClient()
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
cluster.deleteStreams()
// check that streams have been deleted
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err)
return streams
assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items))
}

View File

@ -300,6 +300,7 @@ func (c *Cluster) syncPatroniService() error {
err error
)
serviceName := fmt.Sprintf("%s-%s", c.Name, Patroni)
c.logger.Debugf("syncing %s service", serviceName)
c.setProcessName("syncing %s service", serviceName)
if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}); err == nil {
@ -311,7 +312,7 @@ func (c *Cluster) syncPatroniService() error {
c.setProcessName("updating %v service", serviceName)
svc, err = c.KubeClient.Services(c.Namespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("could not update %s endpoint: %v", serviceName, err)
return fmt.Errorf("could not update %s service: %v", serviceName, err)
}
c.Services[Patroni] = svc
}
@ -537,7 +538,7 @@ func (c *Cluster) syncStatefulSet() error {
if err != nil {
return fmt.Errorf("could not generate statefulset: %v", err)
}
c.logger.Debugf("syncing statefulsets")
c.logger.Debug("syncing statefulsets")
// check if there are still pods with a rolling update flag
for _, pod := range pods {
if c.getRollingUpdateFlagFromPod(&pod) {
@ -552,7 +553,7 @@ func (c *Cluster) syncStatefulSet() error {
}
if len(podsToRecreate) > 0 {
c.logger.Debugf("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods))
c.logger.Infof("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods))
}
// statefulset is already there, make sure we use its definition in order to compare with the spec.
@ -658,7 +659,7 @@ func (c *Cluster) syncStatefulSet() error {
// statefulset or those that got their configuration from the outdated statefulset)
if len(podsToRecreate) > 0 {
if isSafeToRecreatePods {
c.logger.Debugln("performing rolling update")
c.logger.Info("performing rolling update")
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Performing rolling update")
if err := c.recreatePods(podsToRecreate, switchoverCandidates); err != nil {
return fmt.Errorf("could not recreate pods: %v", err)
@ -971,7 +972,7 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
// carries the request to change configuration through
for _, pod := range pods {
podName := util.NameFromMeta(pod.ObjectMeta)
c.logger.Debugf("patching Postgres config via Patroni API on pod %s with following options: %s",
c.logger.Infof("patching Postgres config via Patroni API on pod %s with following options: %s",
podName, standbyOptionsToSet)
if err = c.patroni.SetStandbyClusterParameters(&pod, standbyOptionsToSet); err == nil {
return nil
@ -983,7 +984,7 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
}
func (c *Cluster) syncSecrets() error {
c.logger.Info("syncing secrets")
c.logger.Debug("syncing secrets")
c.setProcessName("syncing secrets")
generatedSecrets := c.generateUserSecrets()
retentionUsers := make([]string, 0)
@ -993,7 +994,7 @@ func (c *Cluster) syncSecrets() error {
secret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{})
if err == nil {
c.Secrets[secret.UID] = secret
c.logger.Debugf("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID)
c.logger.Infof("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID)
continue
}
if k8sutil.ResourceAlreadyExists(err) {
@ -1134,7 +1135,7 @@ func (c *Cluster) updateSecret(
}
if updateSecret {
c.logger.Debugln(updateSecretMsg)
c.logger.Infof(updateSecretMsg)
if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("could not update secret %s: %v", secretName, err)
}

View File

@ -644,7 +644,7 @@ func TestUpdateSecret(t *testing.T) {
ApplicationId: appId,
Database: dbname,
Tables: map[string]acidv1.StreamTable{
"data.foo": acidv1.StreamTable{
"data.foo": {
EventType: "stream-type-b",
},
},

View File

@ -193,7 +193,7 @@ func logNiceDiff(log *logrus.Entry, old, new interface{}) {
nice := nicediff.Diff(string(o), string(n), true)
for _, s := range strings.Split(nice, "\n") {
// " is not needed in the value to understand
log.Debugf(strings.ReplaceAll(s, "\"", ""))
log.Debug(strings.ReplaceAll(s, "\"", ""))
}
}
@ -209,7 +209,7 @@ func (c *Cluster) logStatefulSetChanges(old, new *appsv1.StatefulSet, isUpdate b
logNiceDiff(c.logger, old.Spec, new.Spec)
if !reflect.DeepEqual(old.Annotations, new.Annotations) {
c.logger.Debugf("metadata.annotation are different")
c.logger.Debug("metadata.annotation are different")
logNiceDiff(c.logger, old.Annotations, new.Annotations)
}
@ -280,7 +280,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
}
if !c.OpConfig.EnableTeamsAPI {
c.logger.Debugf("team API is disabled")
c.logger.Debug("team API is disabled")
return members, nil
}
@ -416,7 +416,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error {
podsNumber = len(pods.Items)
c.logger.Debugf("Waiting for %d pods to become ready", podsNumber)
} else {
c.logger.Debugf("Waiting for any replica pod to become ready")
c.logger.Debug("Waiting for any replica pod to become ready")
}
err := retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,

View File

@ -13,9 +13,9 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/filesystems"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
"github.com/zalando/postgres-operator/pkg/util/volumes"
)
@ -66,7 +66,7 @@ func (c *Cluster) syncVolumes() error {
}
func (c *Cluster) syncUnderlyingEBSVolume() error {
c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size")
c.logger.Debug("starting to sync EBS volumes: type, iops, throughput, and size")
var (
err error
@ -136,7 +136,7 @@ func (c *Cluster) syncUnderlyingEBSVolume() error {
}
func (c *Cluster) populateVolumeMetaData() error {
c.logger.Infof("starting reading ebs meta data")
c.logger.Debug("starting reading ebs meta data")
pvs, err := c.listPersistentVolumes()
if err != nil {
@ -151,7 +151,7 @@ func (c *Cluster) populateVolumeMetaData() error {
volumeIds := []string{}
var volumeID string
for _, pv := range pvs {
volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
volumeID, err = c.VolumeResizer.GetProviderVolumeID(pv)
if err != nil {
continue
}
@ -165,7 +165,7 @@ func (c *Cluster) populateVolumeMetaData() error {
}
if len(currentVolumes) != len(c.EBSVolumes) && len(c.EBSVolumes) > 0 {
c.logger.Debugf("number of ebs volumes (%d) discovered differs from already known volumes (%d)", len(currentVolumes), len(c.EBSVolumes))
c.logger.Infof("number of ebs volumes (%d) discovered differs from already known volumes (%d)", len(currentVolumes), len(c.EBSVolumes))
}
// reset map, operator is not responsible for dangling ebs volumes
@ -185,7 +185,7 @@ func (c *Cluster) syncVolumeClaims() error {
if c.OpConfig.StorageResizeMode == "off" || c.OpConfig.StorageResizeMode == "ebs" {
ignoreResize = true
c.logger.Debugf("Storage resize mode is set to %q. Skipping volume size sync of PVCs.", c.OpConfig.StorageResizeMode)
c.logger.Debugf("Storage resize mode is set to %q. Skipping volume size sync of persistent volume claims.", c.OpConfig.StorageResizeMode)
}
newSize, err := resource.ParseQuantity(c.Spec.Volume.Size)
@ -196,27 +196,30 @@ func (c *Cluster) syncVolumeClaims() error {
pvcs, err := c.listPersistentVolumeClaims()
if err != nil {
return fmt.Errorf("could not receive persistent volume claims: %v", err)
return fmt.Errorf("could not list persistent volume claims: %v", err)
}
for _, pvc := range pvcs {
c.VolumeClaims[pvc.UID] = &pvc
needsUpdate := false
currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage])
if !ignoreResize && currentSize != manifestSize {
if currentSize < manifestSize {
pvc.Spec.Resources.Requests[v1.ResourceStorage] = newSize
needsUpdate = true
c.logger.Debugf("persistent volume claim for volume %q needs to be resized", pvc.Name)
c.logger.Infof("persistent volume claim for volume %q needs to be resized", pvc.Name)
} else {
c.logger.Warningf("cannot shrink persistent volume")
}
}
if needsUpdate {
c.logger.Debugf("updating persistent volume claim definition for volume %q", pvc.Name)
if _, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}); err != nil {
c.logger.Infof("updating persistent volume claim definition for volume %q", pvc.Name)
updatedPvc, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("could not update persistent volume claim: %q", err)
}
c.logger.Debugf("successfully updated persistent volume claim %q", pvc.Name)
c.VolumeClaims[pvc.UID] = updatedPvc
c.logger.Infof("successfully updated persistent volume claim %q", pvc.Name)
} else {
c.logger.Debugf("volume claim for volume %q do not require updates", pvc.Name)
}
@ -227,14 +230,15 @@ func (c *Cluster) syncVolumeClaims() error {
if err != nil {
return fmt.Errorf("could not form patch for the persistent volume claim for volume %q: %v", pvc.Name, err)
}
_, err = c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
patchedPvc, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("could not patch annotations of the persistent volume claim for volume %q: %v", pvc.Name, err)
}
c.VolumeClaims[pvc.UID] = patchedPvc
}
}
c.logger.Infof("volume claims have been synced successfully")
c.logger.Debug("volume claims have been synced successfully")
return nil
}
@ -255,7 +259,7 @@ func (c *Cluster) syncEbsVolumes() error {
return fmt.Errorf("could not sync volumes: %v", err)
}
c.logger.Infof("volumes have been synced successfully")
c.logger.Debug("volumes have been synced successfully")
return nil
}
@ -268,38 +272,50 @@ func (c *Cluster) listPersistentVolumeClaims() ([]v1.PersistentVolumeClaim, erro
pvcs, err := c.KubeClient.PersistentVolumeClaims(ns).List(context.TODO(), listOptions)
if err != nil {
return nil, fmt.Errorf("could not list of PersistentVolumeClaims: %v", err)
return nil, fmt.Errorf("could not list of persistent volume claims: %v", err)
}
return pvcs.Items, nil
}
func (c *Cluster) deletePersistentVolumeClaims() error {
c.logger.Debugln("deleting PVCs")
pvcs, err := c.listPersistentVolumeClaims()
if err != nil {
return err
}
for _, pvc := range pvcs {
c.logger.Debugf("deleting PVC %q", util.NameFromMeta(pvc.ObjectMeta))
if err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, c.deleteOptions); err != nil {
c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err)
c.setProcessName("deleting persistent volume claims")
errors := make([]string, 0)
for uid := range c.VolumeClaims {
err := c.deletePersistentVolumeClaim(uid)
if err != nil {
errors = append(errors, fmt.Sprintf("%v", err))
}
}
if len(pvcs) > 0 {
c.logger.Debugln("PVCs have been deleted")
} else {
c.logger.Debugln("no PVCs to delete")
if len(errors) > 0 {
c.logger.Warningf("could not delete all persistent volume claims: %v", strings.Join(errors, `', '`))
}
return nil
}
func (c *Cluster) deletePersistentVolumeClaim(uid types.UID) error {
c.setProcessName("deleting persistent volume claim")
pvc := c.VolumeClaims[uid]
c.logger.Debugf("deleting persistent volume claim %q", pvc.Name)
err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, c.deleteOptions)
if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("persistent volume claim %q has already been deleted", pvc.Name)
} else if err != nil {
return fmt.Errorf("could not delete persistent volume claim %q: %v", pvc.Name, err)
}
c.logger.Infof("persistent volume claim %q has been deleted", pvc.Name)
delete(c.VolumeClaims, uid)
return nil
}
func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) {
result := make([]*v1.PersistentVolume, 0)
pvcs, err := c.listPersistentVolumeClaims()
if err != nil {
return nil, fmt.Errorf("could not list cluster's PersistentVolumeClaims: %v", err)
return nil, fmt.Errorf("could not list cluster's persistent volume claims: %v", err)
}
pods, err := c.listPods()
@ -382,22 +398,22 @@ func (c *Cluster) resizeVolumes() error {
if err != nil {
return err
}
c.logger.Debugf("updating persistent volume %q to %d", pv.Name, newSize)
c.logger.Infof("updating persistent volume %q to %d", pv.Name, newSize)
if err := resizer.ResizeVolume(awsVolumeID, newSize); err != nil {
return fmt.Errorf("could not resize EBS volume %q: %v", awsVolumeID, err)
}
c.logger.Debugf("resizing the filesystem on the volume %q", pv.Name)
c.logger.Infof("resizing the filesystem on the volume %q", pv.Name)
podName := getPodNameFromPersistentVolume(pv)
if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil {
return fmt.Errorf("could not resize the filesystem on pod %q: %v", podName, err)
}
c.logger.Debugf("filesystem resize successful on volume %q", pv.Name)
c.logger.Infof("filesystem resize successful on volume %q", pv.Name)
pv.Spec.Capacity[v1.ResourceStorage] = newQuantity
c.logger.Debugf("updating persistent volume definition for volume %q", pv.Name)
c.logger.Infof("updating persistent volume definition for volume %q", pv.Name)
if _, err := c.KubeClient.PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("could not update persistent volume: %q", err)
}
c.logger.Debugf("successfully updated persistent volume %q", pv.Name)
c.logger.Infof("successfully updated persistent volume %q", pv.Name)
if !compatible {
c.logger.Warningf("volume %q is incompatible with all available resizing providers, consider switching storage_resize_mode to pvc or off", pv.Name)
@ -458,7 +474,7 @@ func (c *Cluster) executeEBSMigration() error {
}
if !hasGp2 {
c.logger.Infof("no EBS gp2 volumes left to migrate")
c.logger.Debugf("no EBS gp2 volumes left to migrate")
return nil
}
}

View File

@ -93,7 +93,7 @@ func TestResizeVolumeClaim(t *testing.T) {
// check if listPersistentVolumeClaims returns only the PVCs matching the filter
if len(pvcs) != len(pvcList.Items)-1 {
t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1)
t.Errorf("%s: could not find all persistent volume claims, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1)
}
// check if PVCs were correctly resized
@ -165,7 +165,7 @@ func CreatePVCs(namespace string, clusterName string, labels labels.Set, n int,
Labels: labels,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi,
},
@ -216,6 +216,12 @@ func TestMigrateEBS(t *testing.T) {
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
}).
Times(2)
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
[]volumes.VolumeProperties{
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 100},
@ -256,7 +262,7 @@ func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustern
Labels: labels,
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi,
},
@ -322,6 +328,12 @@ func TestMigrateGp3Support(t *testing.T) {
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil)
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
}).
Times(3)
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return(
[]volumes.VolumeProperties{
{VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000},
@ -377,6 +389,12 @@ func TestManualGp2Gp3Support(t *testing.T) {
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
}).
Times(2)
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
[]volumes.VolumeProperties{
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},
@ -436,6 +454,12 @@ func TestDontTouchType(t *testing.T) {
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
}).
Times(2)
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
[]volumes.VolumeProperties{
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},

View File

@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix
result.EtcdHost = fromCRD.EtcdHost
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-16:3.3-p1")
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p2")
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
result.MinInstances = fromCRD.MinInstances
result.MaxInstances = fromCRD.MaxInstances
@ -62,8 +62,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
// major version upgrade config
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "manual")
result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "12")
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "16")
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "13")
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "17")
// kubernetes config
result.EnableOwnerReferences = util.CoalesceBool(fromCRD.Kubernetes.EnableOwnerReferences, util.False())
@ -180,7 +180,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
// logical backup config
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0")
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0")
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName
result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey

View File

@ -143,7 +143,7 @@ func (c *Controller) acquireInitialListOfClusters() error {
if list, err = c.listClusters(metav1.ListOptions{ResourceVersion: "0"}); err != nil {
return err
}
c.logger.Debugf("acquiring initial list of clusters")
c.logger.Debug("acquiring initial list of clusters")
for _, pg := range list.Items {
// XXX: check the cluster status field instead
if pg.Error != "" {

View File

@ -76,9 +76,8 @@ func (c *Controller) createOperatorCRD(desiredCrd *apiextv1.CustomResourceDefini
context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("could not update customResourceDefinition %q: %v", crd.Name, err)
}
} else {
c.logger.Infof("customResourceDefinition %q has been registered", crd.Name)
}
c.logger.Infof("customResourceDefinition %q is registered", crd.Name)
return wait.PollUntilContextTimeout(context.TODO(), c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, false, func(ctx context.Context) (bool, error) {
c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), desiredCrd.Name, metav1.GetOptions{})

View File

@ -132,7 +132,7 @@ func TestOldInfrastructureRoleFormat(t *testing.T) {
for _, test := range testTable {
roles, err := utilTestController.getInfrastructureRoles(
[]*config.InfrastructureRole{
&config.InfrastructureRole{
{
SecretName: test.secretName,
UserKey: "user",
PasswordKey: "password",
@ -163,7 +163,7 @@ func TestNewInfrastructureRoleFormat(t *testing.T) {
// one secret with one configmap
{
[]spec.NamespacedName{
spec.NamespacedName{
{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName,
},
@ -187,11 +187,11 @@ func TestNewInfrastructureRoleFormat(t *testing.T) {
// multiple standalone secrets
{
[]spec.NamespacedName{
spec.NamespacedName{
{
Namespace: v1.NamespaceDefault,
Name: "infrastructureroles-new-test1",
},
spec.NamespacedName{
{
Namespace: v1.NamespaceDefault,
Name: "infrastructureroles-new-test2",
},
@ -248,7 +248,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
// only new CRD format
{
[]*config.InfrastructureRole{
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName,
@ -262,7 +262,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
spec.NamespacedName{},
"",
[]*config.InfrastructureRole{
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName,
@ -280,7 +280,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
spec.NamespacedName{},
"secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role",
[]*config.InfrastructureRole{
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName,
@ -298,7 +298,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
spec.NamespacedName{},
"secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, defaultrolevalue: test-role",
[]*config.InfrastructureRole{
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName,
@ -319,7 +319,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
},
"",
[]*config.InfrastructureRole{
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesOldSecretName,
@ -334,7 +334,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
// both formats for CRD
{
[]*config.InfrastructureRole{
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName,
@ -351,7 +351,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
},
"",
[]*config.InfrastructureRole{
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName,
@ -361,7 +361,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
RoleKey: "test-role",
Template: false,
},
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesOldSecretName,
@ -382,7 +382,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
},
"secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role",
[]*config.InfrastructureRole{
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName,
@ -392,7 +392,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
RoleKey: "test-role",
Template: false,
},
&config.InfrastructureRole{
{
SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesOldSecretName,

View File

@ -122,6 +122,9 @@ type ControllerConfig struct {
IgnoredAnnotations []string
EnableJsonLogging bool
KubeQPS int
KubeBurst int
}
// cached value for the GetOperatorNamespace

View File

@ -127,7 +127,7 @@ type Scalyr struct {
// LogicalBackup defines configuration for logical backup
type LogicalBackup struct {
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"`
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"`
LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"`
LogicalBackupAzureStorageAccountName string `name:"logical_backup_azure_storage_account_name" default:""`
LogicalBackupAzureStorageContainer string `name:"logical_backup_azure_storage_container" default:""`
@ -175,7 +175,7 @@ type Config struct {
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-16:3.3-p1"`
DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-17:4.0-p2"`
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
SidecarContainers []v1.Container `name:"sidecars"`
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
@ -246,8 +246,8 @@ type Config struct {
EnableTeamIdClusternamePrefix bool `name:"enable_team_id_clustername_prefix" default:"false"`
MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"manual"`
MajorVersionUpgradeTeamAllowList []string `name:"major_version_upgrade_team_allow_list" default:""`
MinimalMajorVersion string `name:"minimal_major_version" default:"12"`
TargetMajorVersion string `name:"target_major_version" default:"16"`
MinimalMajorVersion string `name:"minimal_major_version" default:"13"`
TargetMajorVersion string `name:"target_major_version" default:"17"`
PatroniAPICheckInterval time.Duration `name:"patroni_api_check_interval" default:"1s"`
PatroniAPICheckTimeout time.Duration `name:"patroni_api_check_timeout" default:"5s"`
EnablePatroniFailsafeMode *bool `name:"enable_patroni_failsafe_mode" default:"false"`

View File

@ -7,6 +7,7 @@ const (
// EBS related constants
EBSVolumeIDStart = "/vol-"
EBSProvisioner = "kubernetes.io/aws-ebs"
EBSDriver = "ebs.csi.aws.com"
//https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VolumeModification.html
EBSVolumeStateModifying = "modifying"
EBSVolumeStateOptimizing = "optimizing"

View File

@ -2,16 +2,19 @@ package constants
// PostgreSQL specific constants
const (
EventStreamCRDApiVersion = "zalando.org/v1"
EventStreamCRDKind = "FabricEventStream"
EventStreamCRDName = "fabriceventstreams.zalando.org"
EventStreamSourcePGType = "PostgresLogicalReplication"
EventStreamSourceSlotPrefix = "fes"
EventStreamSourcePluginType = "pgoutput"
EventStreamSourceAuthType = "DatabaseAuthenticationSecret"
EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent"
EventStreamSinkNakadiType = "Nakadi"
EventStreamRecoveryNoneType = "None"
EventStreamRecoveryDLQType = "DeadLetter"
EventStreamRecoverySuffix = "dead-letter-queue"
EventStreamCRDApiVersion = "zalando.org/v1"
EventStreamCRDKind = "FabricEventStream"
EventStreamCRDName = "fabriceventstreams.zalando.org"
EventStreamSourcePGType = "PostgresLogicalReplication"
EventStreamSourceSlotPrefix = "fes"
EventStreamSourcePluginType = "pgoutput"
EventStreamSourceAuthType = "DatabaseAuthenticationSecret"
EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent"
EventStreamSinkNakadiType = "Nakadi"
EventStreamRecoveryDLQType = "DeadLetter"
EventStreamRecoveryIgnoreType = "Ignore"
EventStreamRecoveryNoneType = "None"
EventStreamRecoverySuffix = "dead-letter-queue"
EventStreamCpuAnnotationKey = "fes.zalando.org/FES_CPU"
EventStreamMemoryAnnotationKey = "fes.zalando.org/FES_MEMORY"
)

View File

@ -7,8 +7,6 @@ import (
b64 "encoding/base64"
"encoding/json"
clientbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
apiacidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
zalandoclient "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned"
acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
@ -24,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
policyv1 "k8s.io/client-go/kubernetes/typed/policy/v1"
rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
@ -59,9 +58,9 @@ type KubernetesClient struct {
appsv1.StatefulSetsGetter
appsv1.DeploymentsGetter
rbacv1.RoleBindingsGetter
batchv1.CronJobsGetter
policyv1.PodDisruptionBudgetsGetter
apiextv1client.CustomResourceDefinitionsGetter
clientbatchv1.CronJobsGetter
acidv1.OperatorConfigurationsGetter
acidv1.PostgresTeamsGetter
acidv1.PostgresqlsGetter
@ -373,7 +372,7 @@ func (mock *mockDeployment) Get(ctx context.Context, name string, opts metav1.Ge
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
v1.Container{
{
Image: "pooler:1.0",
},
},

View File

@ -24,7 +24,7 @@ const (
doBlockStmt = `SET LOCAL synchronous_commit = 'local'; DO $$ BEGIN %s; END;$$;`
passwordTemplate = "ENCRYPTED PASSWORD '%s'"
inRoleTemplate = `IN ROLE %s`
adminTemplate = `ADMIN %s`
adminTemplate = `ADMIN "%s"`
)
// DefaultUserSyncStrategy implements a user sync strategy that merges already existing database users

View File

@ -36,7 +36,8 @@ func (r *EBSVolumeResizer) IsConnectedToProvider() bool {
// VolumeBelongsToProvider checks if the given persistent volume is backed by EBS.
func (r *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool {
return pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner
return (pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner) ||
(pv.Spec.CSI != nil && pv.Spec.CSI.Driver == constants.EBSDriver)
}
// ExtractVolumeID extracts volumeID from "aws://eu-central-1a/vol-075ddfc4a127d0bd4"
@ -54,7 +55,12 @@ func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {
// GetProviderVolumeID converts aws://eu-central-1b/vol-00f93d4827217c629 to vol-00f93d4827217c629 for EBS volumes
func (r *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) {
volumeID := pv.Spec.AWSElasticBlockStore.VolumeID
var volumeID string = ""
if pv.Spec.CSI != nil {
volumeID = pv.Spec.CSI.VolumeHandle
} else if pv.Spec.AWSElasticBlockStore != nil {
volumeID = pv.Spec.AWSElasticBlockStore.VolumeID
}
if volumeID == "" {
return "", fmt.Errorf("got empty volume id for volume %v", pv)
}

View File

@ -0,0 +1,123 @@
package volumes
import (
"fmt"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestGetProviderVolumeID(t *testing.T) {
tests := []struct {
name string
pv *v1.PersistentVolume
expected string
err error
}{
{
name: "CSI volume handle",
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
VolumeHandle: "vol-075ddfc4a127d0bd5",
},
},
},
},
expected: "vol-075ddfc4a127d0bd5",
err: nil,
},
{
name: "AWS EBS volume handle",
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "aws://eu-central-1a/vol-075ddfc4a127d0bd4",
},
},
},
},
expected: "vol-075ddfc4a127d0bd4",
err: nil,
},
{
name: "Empty volume handle",
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{},
},
expected: "",
err: fmt.Errorf("got empty volume id for volume %v", &v1.PersistentVolume{}),
},
}
resizer := EBSVolumeResizer{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
volumeID, err := resizer.GetProviderVolumeID(tt.pv)
if volumeID != tt.expected || (err != nil && err.Error() != tt.err.Error()) {
t.Errorf("expected %v, got %v, expected err %v, got %v", tt.expected, volumeID, tt.err, err)
}
})
}
}
func TestVolumeBelongsToProvider(t *testing.T) {
tests := []struct {
name string
pv *v1.PersistentVolume
expected bool
}{
{
name: "CSI volume handle",
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: "ebs.csi.aws.com",
VolumeHandle: "vol-075ddfc4a127d0bd5",
},
},
},
},
expected: true,
},
{
name: "AWS EBS volume handle",
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string {
"pv.kubernetes.io/provisioned-by": "kubernetes.io/aws-ebs",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "aws://eu-central-1a/vol-075ddfc4a127d0bd4",
},
},
},
},
expected: true,
},
{
name: "Empty volume source",
pv: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{},
},
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resizer := EBSVolumeResizer{}
isProvider := resizer.VolumeBelongsToProvider(tt.pv)
if isProvider != tt.expected {
t.Errorf("expected %v, got %v", tt.expected, isProvider)
}
})
}
}

View File

@ -1,6 +1,6 @@
{
"name": "postgres-operator-ui",
"version": "1.13.0",
"version": "1.14.0",
"description": "PostgreSQL Operator UI",
"main": "src/app.js",
"config": {

View File

@ -18,7 +18,7 @@ spec:
serviceAccountName: postgres-operator-ui
containers:
- name: "service"
image: ghcr.io/zalando/postgres-operator-ui:v1.13.0
image: ghcr.io/zalando/postgres-operator-ui:v1.14.0
ports:
- containerPort: 8081
protocol: "TCP"
@ -73,11 +73,11 @@ spec:
"limit_iops": 16000,
"limit_throughput": 1000,
"postgresql_versions": [
"17",
"16",
"15",
"14",
"13",
"12"
"13"
]
}
# Exemple of settings to make snapshot view working in the ui when using AWS

View File

@ -267,7 +267,7 @@ DEFAULT_UI_CONFIG = {
'users_visible': True,
'databases_visible': True,
'resources_visible': RESOURCES_VISIBLE,
'postgresql_versions': ['12', '13', '14', '15', '16'],
'postgresql_versions': ['13', '14', '15', '16', '17'],
'dns_format_string': '{0}.{1}',
'pgui_link': '',
'static_network_whitelist': {},

View File

@ -305,7 +305,7 @@ def read_versions(
if uid == 'wal' or defaulting(lambda: UUID(uid))
]
BACKUP_VERSION_PREFIXES = ['', '10/', '11/', '12/', '13/', '14/', '15/', '16/']
BACKUP_VERSION_PREFIXES = ['', '10/', '11/', '12/', '13/', '14/', '15/', '16/', '17/']
def read_basebackups(
pg_cluster,

View File

@ -12,4 +12,4 @@ python-json-logger==2.0.7
requests==2.32.2
stups-tokens>=1.1.19
wal_e==1.1.1
werkzeug==3.0.3
werkzeug==3.0.6

View File

@ -31,11 +31,11 @@ default_operator_ui_config='{
"limit_iops": 16000,
"limit_throughput": 1000,
"postgresql_versions": [
"17",
"16",
"15",
"14",
"13",
"12"
"13"
],
"static_network_whitelist": {
"localhost": ["172.0.0.1/32"]