Merge branch 'master' into patroni-4-integration
This commit is contained in:
commit
efff3f8c56
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
|
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: "^1.22.5"
|
go-version: "^1.23.4"
|
||||||
|
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
run: make deps mocks test
|
run: make deps mocks test
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ jobs:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: "^1.22.5"
|
go-version: "^1.23.4"
|
||||||
- name: Make dependencies
|
- name: Make dependencies
|
||||||
run: make deps mocks
|
run: make deps mocks
|
||||||
- name: Code generation
|
- name: Code generation
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ jobs:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: "^1.22.5"
|
go-version: "^1.23.4"
|
||||||
- name: Make dependencies
|
- name: Make dependencies
|
||||||
run: make deps mocks
|
run: make deps mocks
|
||||||
- name: Compile
|
- name: Compile
|
||||||
|
|
@ -22,7 +22,7 @@ jobs:
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
run: go test -race -covermode atomic -coverprofile=coverage.out ./...
|
run: go test -race -covermode atomic -coverprofile=coverage.out ./...
|
||||||
- name: Convert coverage to lcov
|
- name: Convert coverage to lcov
|
||||||
uses: jandelgado/gcov2lcov-action@v1.0.9
|
uses: jandelgado/gcov2lcov-action@v1.1.1
|
||||||
- name: Coveralls
|
- name: Coveralls
|
||||||
uses: coverallsapp/github-action@master
|
uses: coverallsapp/github-action@master
|
||||||
with:
|
with:
|
||||||
|
|
|
||||||
|
|
@ -104,3 +104,5 @@ e2e/tls
|
||||||
mocks
|
mocks
|
||||||
|
|
||||||
ui/.npm/
|
ui/.npm/
|
||||||
|
|
||||||
|
.DS_Store
|
||||||
|
|
|
||||||
4
Makefile
4
Makefile
|
|
@ -69,7 +69,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE}
|
||||||
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" .
|
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" .
|
||||||
|
|
||||||
indocker-race:
|
indocker-race:
|
||||||
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.22.5 bash -c "make linux"
|
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.23.4 bash -c "make linux"
|
||||||
|
|
||||||
push:
|
push:
|
||||||
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
|
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
|
||||||
|
|
@ -78,7 +78,7 @@ mocks:
|
||||||
GO111MODULE=on go generate ./...
|
GO111MODULE=on go generate ./...
|
||||||
|
|
||||||
tools:
|
tools:
|
||||||
GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.30.4
|
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.30.4
|
||||||
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
|
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
|
||||||
GO111MODULE=on go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,13 +28,13 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
||||||
|
|
||||||
### PostgreSQL features
|
### PostgreSQL features
|
||||||
|
|
||||||
* Supports PostgreSQL 16, starting from 12+
|
* Supports PostgreSQL 17, starting from 13+
|
||||||
* Streaming replication cluster via Patroni
|
* Streaming replication cluster via Patroni
|
||||||
* Point-In-Time-Recovery with
|
* Point-In-Time-Recovery with
|
||||||
[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html) /
|
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) /
|
||||||
[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
|
[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
|
||||||
* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),
|
* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),
|
||||||
[pg_stat_statements](https://www.postgresql.org/docs/16/pgstatstatements.html),
|
[pg_stat_statements](https://www.postgresql.org/docs/17/pgstatstatements.html),
|
||||||
[pgextwlist](https://github.com/dimitri/pgextwlist),
|
[pgextwlist](https://github.com/dimitri/pgextwlist),
|
||||||
[pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon)
|
[pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon)
|
||||||
* Incl. popular Postgres extensions such as
|
* Incl. popular Postgres extensions such as
|
||||||
|
|
@ -57,12 +57,12 @@ production for over five years.
|
||||||
|
|
||||||
| Release | Postgres versions | K8s versions | Golang |
|
| Release | Postgres versions | K8s versions | Golang |
|
||||||
| :-------- | :---------------: | :---------------: | :-----: |
|
| :-------- | :---------------: | :---------------: | :-----: |
|
||||||
|
| v1.14.0 | 13 → 17 | 1.27+ | 1.23.4 |
|
||||||
| v1.13.0 | 12 → 16 | 1.27+ | 1.22.5 |
|
| v1.13.0 | 12 → 16 | 1.27+ | 1.22.5 |
|
||||||
| v1.12.0 | 11 → 16 | 1.27+ | 1.22.3 |
|
| v1.12.0 | 11 → 16 | 1.27+ | 1.22.3 |
|
||||||
| v1.11.0 | 11 → 16 | 1.27+ | 1.21.7 |
|
| v1.11.0 | 11 → 16 | 1.27+ | 1.21.7 |
|
||||||
| v1.10.1 | 10 → 15 | 1.21+ | 1.19.8 |
|
| v1.10.1 | 10 → 15 | 1.21+ | 1.19.8 |
|
||||||
| v1.9.0 | 10 → 15 | 1.21+ | 1.18.9 |
|
| v1.9.0 | 10 → 15 | 1.21+ | 1.18.9 |
|
||||||
| v1.8.2 | 9.5 → 14 | 1.20 → 1.24 | 1.17.4 |
|
|
||||||
|
|
||||||
## Getting started
|
## Getting started
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
name: postgres-operator-ui
|
name: postgres-operator-ui
|
||||||
version: 1.13.0
|
version: 1.14.0
|
||||||
appVersion: 1.13.0
|
appVersion: 1.14.0
|
||||||
home: https://github.com/zalando/postgres-operator
|
home: https://github.com/zalando/postgres-operator
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||||
keywords:
|
keywords:
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,32 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
entries:
|
entries:
|
||||||
postgres-operator-ui:
|
postgres-operator-ui:
|
||||||
|
- apiVersion: v2
|
||||||
|
appVersion: 1.14.0
|
||||||
|
created: "2024-12-23T11:26:07.721761867+01:00"
|
||||||
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
|
database-as-a-service user experience
|
||||||
|
digest: e87ed898079a852957a67a4caf3fbd27b9098e413f5d961b7a771a6ae8b3e17c
|
||||||
|
home: https://github.com/zalando/postgres-operator
|
||||||
|
keywords:
|
||||||
|
- postgres
|
||||||
|
- operator
|
||||||
|
- ui
|
||||||
|
- cloud-native
|
||||||
|
- patroni
|
||||||
|
- spilo
|
||||||
|
maintainers:
|
||||||
|
- email: opensource@zalando.de
|
||||||
|
name: Zalando
|
||||||
|
name: postgres-operator-ui
|
||||||
|
sources:
|
||||||
|
- https://github.com/zalando/postgres-operator
|
||||||
|
urls:
|
||||||
|
- postgres-operator-ui-1.14.0.tgz
|
||||||
|
version: 1.14.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.13.0
|
appVersion: 1.13.0
|
||||||
created: "2024-08-21T18:55:36.524305158+02:00"
|
created: "2024-12-23T11:26:07.719409282+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8
|
digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8
|
||||||
|
|
@ -26,7 +49,7 @@ entries:
|
||||||
version: 1.13.0
|
version: 1.13.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.12.2
|
appVersion: 1.12.2
|
||||||
created: "2024-08-21T18:55:36.521875733+02:00"
|
created: "2024-12-23T11:26:07.717202918+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd
|
digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd
|
||||||
|
|
@ -49,7 +72,7 @@ entries:
|
||||||
version: 1.12.2
|
version: 1.12.2
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.11.0
|
appVersion: 1.11.0
|
||||||
created: "2024-08-21T18:55:36.51959105+02:00"
|
created: "2024-12-23T11:26:07.714792146+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2
|
digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2
|
||||||
|
|
@ -72,7 +95,7 @@ entries:
|
||||||
version: 1.11.0
|
version: 1.11.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.10.1
|
appVersion: 1.10.1
|
||||||
created: "2024-08-21T18:55:36.516518177+02:00"
|
created: "2024-12-23T11:26:07.712194397+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce
|
digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce
|
||||||
|
|
@ -95,7 +118,7 @@ entries:
|
||||||
version: 1.10.1
|
version: 1.10.1
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.9.0
|
appVersion: 1.9.0
|
||||||
created: "2024-08-21T18:55:36.52712908+02:00"
|
created: "2024-12-23T11:26:07.723891496+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc
|
digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc
|
||||||
|
|
@ -116,4 +139,4 @@ entries:
|
||||||
urls:
|
urls:
|
||||||
- postgres-operator-ui-1.9.0.tgz
|
- postgres-operator-ui-1.9.0.tgz
|
||||||
version: 1.9.0
|
version: 1.9.0
|
||||||
generated: "2024-08-21T18:55:36.512456099+02:00"
|
generated: "2024-12-23T11:26:07.709192608+01:00"
|
||||||
|
|
|
||||||
Binary file not shown.
|
|
@ -9,7 +9,7 @@ metadata:
|
||||||
name: {{ template "postgres-operator-ui.fullname" . }}
|
name: {{ template "postgres-operator-ui.fullname" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
spec:
|
spec:
|
||||||
replicas: 1
|
replicas: {{ .Values.replicaCount }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||||
|
|
@ -84,11 +84,11 @@ spec:
|
||||||
"limit_iops": 16000,
|
"limit_iops": 16000,
|
||||||
"limit_throughput": 1000,
|
"limit_throughput": 1000,
|
||||||
"postgresql_versions": [
|
"postgresql_versions": [
|
||||||
|
"17",
|
||||||
"16",
|
"16",
|
||||||
"15",
|
"15",
|
||||||
"14",
|
"14",
|
||||||
"13",
|
"13"
|
||||||
"12"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
{{- if .Values.extraEnvs }}
|
{{- if .Values.extraEnvs }}
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
||||||
image:
|
image:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
repository: zalando/postgres-operator-ui
|
repository: zalando/postgres-operator-ui
|
||||||
tag: v1.13.0
|
tag: v1.14.0
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
# Optionally specify an array of imagePullSecrets.
|
# Optionally specify an array of imagePullSecrets.
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
name: postgres-operator
|
name: postgres-operator
|
||||||
version: 1.14.0
|
version: 1.14.0
|
||||||
appVersion: 1.13.0
|
appVersion: 1.14.0
|
||||||
home: https://github.com/zalando/postgres-operator
|
home: https://github.com/zalando/postgres-operator
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||||
keywords:
|
keywords:
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
docker_image:
|
docker_image:
|
||||||
type: string
|
type: string
|
||||||
default: "ghcr.io/zalando/spilo-16:3.3-p1"
|
default: "ghcr.io/zalando/spilo-17:4.0-p2"
|
||||||
enable_crd_registration:
|
enable_crd_registration:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
|
|
@ -167,10 +167,10 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
minimal_major_version:
|
minimal_major_version:
|
||||||
type: string
|
type: string
|
||||||
default: "12"
|
default: "13"
|
||||||
target_major_version:
|
target_major_version:
|
||||||
type: string
|
type: string
|
||||||
default: "16"
|
default: "17"
|
||||||
kubernetes:
|
kubernetes:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
||||||
|
|
@ -375,11 +375,11 @@ spec:
|
||||||
version:
|
version:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
- "12"
|
|
||||||
- "13"
|
- "13"
|
||||||
- "14"
|
- "14"
|
||||||
- "15"
|
- "15"
|
||||||
- "16"
|
- "16"
|
||||||
|
- "17"
|
||||||
parameters:
|
parameters:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
|
|
@ -514,6 +514,9 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
batchSize:
|
batchSize:
|
||||||
type: integer
|
type: integer
|
||||||
|
cpu:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
database:
|
database:
|
||||||
type: string
|
type: string
|
||||||
enableRecovery:
|
enableRecovery:
|
||||||
|
|
@ -522,6 +525,9 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
memory:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
tables:
|
tables:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
|
|
@ -533,6 +539,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
idColumn:
|
idColumn:
|
||||||
type: string
|
type: string
|
||||||
|
ignoreRecovery:
|
||||||
|
type: boolean
|
||||||
payloadColumn:
|
payloadColumn:
|
||||||
type: string
|
type: string
|
||||||
recoveryEventType:
|
recoveryEventType:
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,31 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
entries:
|
entries:
|
||||||
postgres-operator:
|
postgres-operator:
|
||||||
|
- apiVersion: v2
|
||||||
|
appVersion: 1.14.0
|
||||||
|
created: "2024-12-23T11:25:32.596716566+01:00"
|
||||||
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
|
in Kubernetes
|
||||||
|
digest: 36e1571f3f455b213f16cdda7b1158648e8e84deb804ba47ed6b9b6d19263ba8
|
||||||
|
home: https://github.com/zalando/postgres-operator
|
||||||
|
keywords:
|
||||||
|
- postgres
|
||||||
|
- operator
|
||||||
|
- cloud-native
|
||||||
|
- patroni
|
||||||
|
- spilo
|
||||||
|
maintainers:
|
||||||
|
- email: opensource@zalando.de
|
||||||
|
name: Zalando
|
||||||
|
name: postgres-operator
|
||||||
|
sources:
|
||||||
|
- https://github.com/zalando/postgres-operator
|
||||||
|
urls:
|
||||||
|
- postgres-operator-1.14.0.tgz
|
||||||
|
version: 1.14.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.13.0
|
appVersion: 1.13.0
|
||||||
created: "2024-08-21T18:54:43.160735116+02:00"
|
created: "2024-12-23T11:25:32.591136261+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef
|
digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef
|
||||||
|
|
@ -25,7 +47,7 @@ entries:
|
||||||
version: 1.13.0
|
version: 1.13.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.12.2
|
appVersion: 1.12.2
|
||||||
created: "2024-08-21T18:54:43.152249286+02:00"
|
created: "2024-12-23T11:25:32.585419709+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8
|
digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8
|
||||||
|
|
@ -47,7 +69,7 @@ entries:
|
||||||
version: 1.12.2
|
version: 1.12.2
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.11.0
|
appVersion: 1.11.0
|
||||||
created: "2024-08-21T18:54:43.145837894+02:00"
|
created: "2024-12-23T11:25:32.580077286+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9
|
digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9
|
||||||
|
|
@ -69,7 +91,7 @@ entries:
|
||||||
version: 1.11.0
|
version: 1.11.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.10.1
|
appVersion: 1.10.1
|
||||||
created: "2024-08-21T18:54:43.139552116+02:00"
|
created: "2024-12-23T11:25:32.574641578+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c
|
digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c
|
||||||
|
|
@ -91,7 +113,7 @@ entries:
|
||||||
version: 1.10.1
|
version: 1.10.1
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.9.0
|
appVersion: 1.9.0
|
||||||
created: "2024-08-21T18:54:43.168490032+02:00"
|
created: "2024-12-23T11:25:32.604748814+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276
|
digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276
|
||||||
|
|
@ -111,4 +133,4 @@ entries:
|
||||||
urls:
|
urls:
|
||||||
- postgres-operator-1.9.0.tgz
|
- postgres-operator-1.9.0.tgz
|
||||||
version: 1.9.0
|
version: 1.9.0
|
||||||
generated: "2024-08-21T18:54:43.126871802+02:00"
|
generated: "2024-12-23T11:25:32.568598763+01:00"
|
||||||
|
|
|
||||||
Binary file not shown.
|
|
@ -141,7 +141,7 @@ rules:
|
||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- patch
|
- patch
|
||||||
{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }}
|
{{- if or (toString .Values.configKubernetes.storage_resize_mode | eq "pvc") (toString .Values.configKubernetes.storage_resize_mode | eq "mixed") }}
|
||||||
- update
|
- update
|
||||||
{{- end }}
|
{{- end }}
|
||||||
# to read existing PVs. Creation should be done via dynamic provisioning
|
# to read existing PVs. Creation should be done via dynamic provisioning
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,7 @@ spec:
|
||||||
value: {{ template "postgres-operator.controllerID" . }}
|
value: {{ template "postgres-operator.controllerID" . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.extraEnvs }}
|
{{- if .Values.extraEnvs }}
|
||||||
{{- .Values.extraEnvs | toYaml | nindent 8 }}
|
{{ toYaml .Values.extraEnvs | indent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
resources:
|
resources:
|
||||||
{{ toYaml .Values.resources | indent 10 }}
|
{{ toYaml .Values.resources | indent 10 }}
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
image:
|
image:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
repository: zalando/postgres-operator
|
repository: zalando/postgres-operator
|
||||||
tag: v1.13.0
|
tag: v1.14.0
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
# Optionally specify an array of imagePullSecrets.
|
# Optionally specify an array of imagePullSecrets.
|
||||||
|
|
@ -38,7 +38,7 @@ configGeneral:
|
||||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||||
etcd_host: ""
|
etcd_host: ""
|
||||||
# Spilo docker image
|
# Spilo docker image
|
||||||
docker_image: ghcr.io/zalando/spilo-16:3.3-p1
|
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
|
||||||
|
|
||||||
# key name for annotation to ignore globally configured instance limits
|
# key name for annotation to ignore globally configured instance limits
|
||||||
# ignore_instance_limits_annotation_key: ""
|
# ignore_instance_limits_annotation_key: ""
|
||||||
|
|
@ -89,9 +89,9 @@ configMajorVersionUpgrade:
|
||||||
# - acid
|
# - acid
|
||||||
|
|
||||||
# minimal Postgres major version that will not automatically be upgraded
|
# minimal Postgres major version that will not automatically be upgraded
|
||||||
minimal_major_version: "12"
|
minimal_major_version: "13"
|
||||||
# target Postgres major version when upgrading clusters automatically
|
# target Postgres major version when upgrading clusters automatically
|
||||||
target_major_version: "16"
|
target_major_version: "17"
|
||||||
|
|
||||||
configKubernetes:
|
configKubernetes:
|
||||||
# list of additional capabilities for postgres container
|
# list of additional capabilities for postgres container
|
||||||
|
|
|
||||||
|
|
@ -35,6 +35,8 @@ func init() {
|
||||||
flag.BoolVar(&outOfCluster, "outofcluster", false, "Whether the operator runs in- our outside of the Kubernetes cluster.")
|
flag.BoolVar(&outOfCluster, "outofcluster", false, "Whether the operator runs in- our outside of the Kubernetes cluster.")
|
||||||
flag.BoolVar(&config.NoDatabaseAccess, "nodatabaseaccess", false, "Disable all access to the database from the operator side.")
|
flag.BoolVar(&config.NoDatabaseAccess, "nodatabaseaccess", false, "Disable all access to the database from the operator side.")
|
||||||
flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API")
|
flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API")
|
||||||
|
flag.IntVar(&config.KubeQPS, "kubeqps", 10, "Kubernetes api requests per second.")
|
||||||
|
flag.IntVar(&config.KubeBurst, "kubeburst", 20, "Kubernetes api requests burst limit.")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true"
|
config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true"
|
||||||
|
|
@ -83,6 +85,9 @@ func main() {
|
||||||
log.Fatalf("couldn't get REST config: %v", err)
|
log.Fatalf("couldn't get REST config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
config.RestConfig.QPS = float32(config.KubeQPS)
|
||||||
|
config.RestConfig.Burst = config.KubeBurst
|
||||||
|
|
||||||
c := controller.NewController(&config, "")
|
c := controller.NewController(&config, "")
|
||||||
|
|
||||||
c.Run(stop, wg)
|
c.Run(stop, wg)
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22-alpine
|
FROM golang:1.23-alpine
|
||||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||||
|
|
||||||
# We need root certificates to deal with teams api over https
|
# We need root certificates to deal with teams api over https
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest
|
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest
|
||||||
FROM golang:1.22-alpine AS builder
|
FROM golang:1.23-alpine AS builder
|
||||||
ARG VERSION=latest
|
ARG VERSION=latest
|
||||||
|
|
||||||
COPY . /go/src/github.com/zalando/postgres-operator
|
COPY . /go/src/github.com/zalando/postgres-operator
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ apt-get install -y wget
|
||||||
|
|
||||||
(
|
(
|
||||||
cd /tmp
|
cd /tmp
|
||||||
wget -q "https://storage.googleapis.com/golang/go1.22.5.linux-${arch}.tar.gz" -O go.tar.gz
|
wget -q "https://storage.googleapis.com/golang/go1.23.4.linux-${arch}.tar.gz" -O go.tar.gz
|
||||||
tar -xf go.tar.gz
|
tar -xf go.tar.gz
|
||||||
mv go /usr/local
|
mv go /usr/local
|
||||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||||
|
|
|
||||||
|
|
@ -63,14 +63,17 @@ the `PGVERSION` environment variable is set for the database pods. Since
|
||||||
`v1.6.0` the related option `enable_pgversion_env_var` is enabled by default.
|
`v1.6.0` the related option `enable_pgversion_env_var` is enabled by default.
|
||||||
|
|
||||||
In-place major version upgrades can be configured to be executed by the
|
In-place major version upgrades can be configured to be executed by the
|
||||||
operator with the `major_version_upgrade_mode` option. By default it is set
|
operator with the `major_version_upgrade_mode` option. By default, it is
|
||||||
to `off` which means the cluster version will not change when increased in
|
enabled (mode: `manual`). In any case, altering the version in the manifest
|
||||||
the manifest. Still, a rolling update would be triggered updating the
|
will trigger a rolling update of pods to update the `PGVERSION` env variable.
|
||||||
`PGVERSION` variable. But Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py)
|
Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py)
|
||||||
script will notice the version mismatch and start the old version again.
|
script will notice the version mismatch but start the current version again.
|
||||||
|
|
||||||
In this scenario the major version could then be run by a user from within the
|
Next, the operator would call an updage script inside Spilo. When automatic
|
||||||
primary pod. Exec into the container and run:
|
upgrades are disabled (mode: `off`) the upgrade could still be run by a user
|
||||||
|
from within the primary pod. This gives you full control about the point in
|
||||||
|
time when the upgrade can be started (check also maintenance windows below).
|
||||||
|
Exec into the container and run:
|
||||||
```bash
|
```bash
|
||||||
python3 /scripts/inplace_upgrade.py N
|
python3 /scripts/inplace_upgrade.py N
|
||||||
```
|
```
|
||||||
|
|
@ -79,17 +82,32 @@ The upgrade is usually fast, well under one minute for most DBs. Note, that
|
||||||
changes become irrevertible once `pg_upgrade` is called. To understand the
|
changes become irrevertible once `pg_upgrade` is called. To understand the
|
||||||
upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
|
upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
|
||||||
|
|
||||||
When `major_version_upgrade_mode` is set to `manual` the operator will run
|
When `major_version_upgrade_mode` is set to `full` the operator will compare
|
||||||
the upgrade script for you after the manifest is updated and pods are rotated.
|
the version in the manifest with the configured `minimal_major_version`. If it
|
||||||
It is also possible to define `maintenanceWindows` in the Postgres manifest to
|
is lower the operator would start an automatic upgrade as described above. The
|
||||||
better control when such automated upgrades should take place after increasing
|
configured `major_target_version` will be used as the new version. This option
|
||||||
the version.
|
can be useful if you have to get rid of outdated major versions in your fleet.
|
||||||
|
Please note, that the operator does not patch the version in the manifest.
|
||||||
|
Thus, the `full` mode can create drift between desired and actual state.
|
||||||
|
|
||||||
|
### Upgrade during maintenance windows
|
||||||
|
|
||||||
|
When `maintenanceWindows` are defined in the Postgres manifest the operator
|
||||||
|
will trigger a major version upgrade only during these periods. Make sure they
|
||||||
|
are at least twice as long as your configured `resync_period` to guarantee
|
||||||
|
that operator actions can be triggered.
|
||||||
|
|
||||||
### Upgrade annotations
|
### Upgrade annotations
|
||||||
|
|
||||||
When an upgrade is executed, the operator sets an annotation in the PostgreSQL resource, either `last-major-upgrade-success` if the upgrade succeeds, or `last-major-upgrade-failure` if it fails. The value of the annotation is a timestamp indicating when the upgrade occurred.
|
When an upgrade is executed, the operator sets an annotation in the PostgreSQL
|
||||||
|
resource, either `last-major-upgrade-success` if the upgrade succeeds, or
|
||||||
|
`last-major-upgrade-failure` if it fails. The value of the annotation is a
|
||||||
|
timestamp indicating when the upgrade occurred.
|
||||||
|
|
||||||
If a PostgreSQL resource contains a failure annotation, the operator will not attempt to retry the upgrade during a sync event. To remove the failure annotation, you can revert the PostgreSQL version back to the current version. This action will trigger the removal of the failure annotation.
|
If a PostgreSQL resource contains a failure annotation, the operator will not
|
||||||
|
attempt to retry the upgrade during a sync event. To remove the failure
|
||||||
|
annotation, you can revert the PostgreSQL version back to the current version.
|
||||||
|
This action will trigger the removal of the failure annotation.
|
||||||
|
|
||||||
## Non-default cluster domain
|
## Non-default cluster domain
|
||||||
|
|
||||||
|
|
@ -1279,7 +1297,7 @@ aws_or_gcp:
|
||||||
|
|
||||||
If cluster members have to be (re)initialized restoring physical backups
|
If cluster members have to be (re)initialized restoring physical backups
|
||||||
happens automatically either from the backup location or by running
|
happens automatically either from the backup location or by running
|
||||||
[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html)
|
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html)
|
||||||
on one of the other running instances (preferably replicas if they do not lag
|
on one of the other running instances (preferably replicas if they do not lag
|
||||||
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
|
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
|
||||||
clusters.
|
clusters.
|
||||||
|
|
@ -1387,6 +1405,10 @@ configuration:
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /custom-pgdata-mountpoint
|
- mountPath: /custom-pgdata-mountpoint
|
||||||
name: pgdata
|
name: pgdata
|
||||||
|
env:
|
||||||
|
- name: "ENV_VAR_NAME"
|
||||||
|
value: "any-k8s-env-things"
|
||||||
|
command: ['sh', '-c', 'echo "logging" > /opt/logs.txt']
|
||||||
- ...
|
- ...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -186,7 +186,7 @@ go get -u github.com/derekparker/delve/cmd/dlv
|
||||||
|
|
||||||
```
|
```
|
||||||
RUN apk --no-cache add go git musl-dev
|
RUN apk --no-cache add go git musl-dev
|
||||||
RUN go get -d github.com/derekparker/delve/cmd/dlv
|
RUN go get github.com/derekparker/delve/cmd/dlv
|
||||||
```
|
```
|
||||||
|
|
||||||
* Update the `Makefile` to build the project with debugging symbols. For that
|
* Update the `Makefile` to build the project with debugging symbols. For that
|
||||||
|
|
|
||||||
|
|
@ -638,7 +638,7 @@ the global configuration before adding the `tls` section'.
|
||||||
## Change data capture streams
|
## Change data capture streams
|
||||||
|
|
||||||
This sections enables change data capture (CDC) streams via Postgres'
|
This sections enables change data capture (CDC) streams via Postgres'
|
||||||
[logical decoding](https://www.postgresql.org/docs/16/logicaldecoding.html)
|
[logical decoding](https://www.postgresql.org/docs/17/logicaldecoding.html)
|
||||||
feature and `pgoutput` plugin. While the Postgres operator takes responsibility
|
feature and `pgoutput` plugin. While the Postgres operator takes responsibility
|
||||||
for providing the setup to publish change events, it relies on external tools
|
for providing the setup to publish change events, it relies on external tools
|
||||||
to consume them. At Zalando, we are using a workflow based on
|
to consume them. At Zalando, we are using a workflow based on
|
||||||
|
|
@ -652,11 +652,11 @@ can have the following properties:
|
||||||
|
|
||||||
* **applicationId**
|
* **applicationId**
|
||||||
The application name to which the database and CDC belongs to. For each
|
The application name to which the database and CDC belongs to. For each
|
||||||
set of streams with a distinct `applicationId` a separate stream CR as well
|
set of streams with a distinct `applicationId` a separate stream resource as
|
||||||
as a separate logical replication slot will be created. This means there can
|
well as a separate logical replication slot will be created. This means there
|
||||||
be different streams in the same database and streams with the same
|
can be different streams in the same database and streams with the same
|
||||||
`applicationId` are bundled in one stream CR. The stream CR will be called
|
`applicationId` are bundled in one stream resource. The stream resource will
|
||||||
like the Postgres cluster plus "-<applicationId>" suffix. Required.
|
be called like the Postgres cluster plus "-<applicationId>" suffix. Required.
|
||||||
|
|
||||||
* **database**
|
* **database**
|
||||||
Name of the database from where events will be published via Postgres'
|
Name of the database from where events will be published via Postgres'
|
||||||
|
|
@ -667,21 +667,37 @@ can have the following properties:
|
||||||
|
|
||||||
* **tables**
|
* **tables**
|
||||||
Defines a map of table names and their properties (`eventType`, `idColumn`
|
Defines a map of table names and their properties (`eventType`, `idColumn`
|
||||||
and `payloadColumn`). The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
|
and `payloadColumn`). Required.
|
||||||
|
The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
|
||||||
The application is responsible for putting events into a (JSON/B or VARCHAR)
|
The application is responsible for putting events into a (JSON/B or VARCHAR)
|
||||||
payload column of the outbox table in the structure of the specified target
|
payload column of the outbox table in the structure of the specified target
|
||||||
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/16/logical-replication-publication.html)
|
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/17/logical-replication-publication.html)
|
||||||
in Postgres for all tables specified for one `database` and `applicationId`.
|
in Postgres for all tables specified for one `database` and `applicationId`.
|
||||||
The CDC operator will consume from it shortly after transactions are
|
The CDC operator will consume from it shortly after transactions are
|
||||||
committed to the outbox table. The `idColumn` will be used in telemetry for
|
committed to the outbox table. The `idColumn` will be used in telemetry for
|
||||||
the CDC operator. The names for `idColumn` and `payloadColumn` can be
|
the CDC operator. The names for `idColumn` and `payloadColumn` can be
|
||||||
configured. Defaults are `id` and `payload`. The target `eventType` has to
|
configured. Defaults are `id` and `payload`. The target `eventType` has to
|
||||||
be defined. Required.
|
be defined. One can also specify a `recoveryEventType` that will be used
|
||||||
|
for a dead letter queue. By enabling `ignoreRecovery`, you can choose to
|
||||||
|
ignore failing events.
|
||||||
|
|
||||||
* **filter**
|
* **filter**
|
||||||
Streamed events can be filtered by a jsonpath expression for each table.
|
Streamed events can be filtered by a jsonpath expression for each table.
|
||||||
Optional.
|
Optional.
|
||||||
|
|
||||||
|
* **enableRecovery**
|
||||||
|
Flag to enable a dead letter queue recovery for all streams tables.
|
||||||
|
Alternatively, recovery can also be enable for single outbox tables by only
|
||||||
|
specifying a `recoveryEventType` and no `enableRecovery` flag. When set to
|
||||||
|
false or missing, events will be retried until consuming succeeded. You can
|
||||||
|
use a `filter` expression to get rid of poison pills. Optional.
|
||||||
|
|
||||||
* **batchSize**
|
* **batchSize**
|
||||||
Defines the size of batches in which events are consumed. Optional.
|
Defines the size of batches in which events are consumed. Optional.
|
||||||
Defaults to 1.
|
Defaults to 1.
|
||||||
|
|
||||||
|
* **cpu**
|
||||||
|
CPU requests to be set as an annotation on the stream resource. Optional.
|
||||||
|
|
||||||
|
* **memory**
|
||||||
|
memory requests to be set as an annotation on the stream resource. Optional.
|
||||||
|
|
|
||||||
|
|
@ -94,9 +94,6 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
* **enable_pgversion_env_var**
|
* **enable_pgversion_env_var**
|
||||||
With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`.
|
With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`.
|
||||||
|
|
||||||
* **enable_spilo_wal_path_compat**
|
|
||||||
enables backwards compatible path between Spilo 12 and Spilo 13+ images. The default is `false`.
|
|
||||||
|
|
||||||
* **enable_team_id_clustername_prefix**
|
* **enable_team_id_clustername_prefix**
|
||||||
To lower the risk of name clashes between clusters of different teams you
|
To lower the risk of name clashes between clusters of different teams you
|
||||||
can turn on this flag and the operator will sync only clusters where the
|
can turn on this flag and the operator will sync only clusters where the
|
||||||
|
|
@ -250,12 +247,12 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key.
|
||||||
|
|
||||||
* **minimal_major_version**
|
* **minimal_major_version**
|
||||||
The minimal Postgres major version that will not automatically be upgraded
|
The minimal Postgres major version that will not automatically be upgraded
|
||||||
when `major_version_upgrade_mode` is set to `"full"`. The default is `"12"`.
|
when `major_version_upgrade_mode` is set to `"full"`. The default is `"13"`.
|
||||||
|
|
||||||
* **target_major_version**
|
* **target_major_version**
|
||||||
The target Postgres major version when upgrading clusters automatically
|
The target Postgres major version when upgrading clusters automatically
|
||||||
which violate the configured allowed `minimal_major_version` when
|
which violate the configured allowed `minimal_major_version` when
|
||||||
`major_version_upgrade_mode` is set to `"full"`. The default is `"16"`.
|
`major_version_upgrade_mode` is set to `"full"`. The default is `"17"`.
|
||||||
|
|
||||||
## Kubernetes resources
|
## Kubernetes resources
|
||||||
|
|
||||||
|
|
|
||||||
13
docs/user.md
13
docs/user.md
|
|
@ -30,7 +30,7 @@ spec:
|
||||||
databases:
|
databases:
|
||||||
foo: zalando
|
foo: zalando
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "16"
|
version: "17"
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
|
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
|
||||||
|
|
@ -109,7 +109,7 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
[...]
|
[...]
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "16"
|
version: "17"
|
||||||
parameters:
|
parameters:
|
||||||
password_encryption: scram-sha-256
|
password_encryption: scram-sha-256
|
||||||
```
|
```
|
||||||
|
|
@ -517,7 +517,7 @@ Postgres Operator will create the following NOLOGIN roles:
|
||||||
|
|
||||||
The `<dbname>_owner` role is the database owner and should be used when creating
|
The `<dbname>_owner` role is the database owner and should be used when creating
|
||||||
new database objects. All members of the `admin` role, e.g. teams API roles, can
|
new database objects. All members of the `admin` role, e.g. teams API roles, can
|
||||||
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/16/sql-alterdefaultprivileges.html)
|
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/17/sql-alterdefaultprivileges.html)
|
||||||
are configured for the owner role so that the `<dbname>_reader` role
|
are configured for the owner role so that the `<dbname>_reader` role
|
||||||
automatically gets read-access (SELECT) to new tables and sequences and the
|
automatically gets read-access (SELECT) to new tables and sequences and the
|
||||||
`<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables,
|
`<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables,
|
||||||
|
|
@ -594,7 +594,7 @@ spec:
|
||||||
|
|
||||||
### Schema `search_path` for default roles
|
### Schema `search_path` for default roles
|
||||||
|
|
||||||
The schema [`search_path`](https://www.postgresql.org/docs/16/ddl-schemas.html#DDL-SCHEMAS-PATH)
|
The schema [`search_path`](https://www.postgresql.org/docs/17/ddl-schemas.html#DDL-SCHEMAS-PATH)
|
||||||
for each role will include the role name and the schemas, this role should have
|
for each role will include the role name and the schemas, this role should have
|
||||||
access to. So `foo_bar_writer` does not have to schema-qualify tables from
|
access to. So `foo_bar_writer` does not have to schema-qualify tables from
|
||||||
schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and
|
schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and
|
||||||
|
|
@ -695,7 +695,7 @@ handle it.
|
||||||
|
|
||||||
### HugePages support
|
### HugePages support
|
||||||
|
|
||||||
The operator supports [HugePages](https://www.postgresql.org/docs/16/kernel-resources.html#LINUX-HUGEPAGES).
|
The operator supports [HugePages](https://www.postgresql.org/docs/17/kernel-resources.html#LINUX-HUGEPAGES).
|
||||||
To enable HugePages, set the matching resource requests and/or limits in the manifest:
|
To enable HugePages, set the matching resource requests and/or limits in the manifest:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
|
@ -838,7 +838,7 @@ spec:
|
||||||
### Clone directly
|
### Clone directly
|
||||||
|
|
||||||
Another way to get a fresh copy of your source DB cluster is via
|
Another way to get a fresh copy of your source DB cluster is via
|
||||||
[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html). To
|
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html). To
|
||||||
use this feature simply leave out the timestamp field from the clone section.
|
use this feature simply leave out the timestamp field from the clone section.
|
||||||
The operator will connect to the service of the source cluster by name. If the
|
The operator will connect to the service of the source cluster by name. If the
|
||||||
cluster is called test, then the connection string will look like host=test
|
cluster is called test, then the connection string will look like host=test
|
||||||
|
|
@ -1005,6 +1005,7 @@ spec:
|
||||||
env:
|
env:
|
||||||
- name: "ENV_VAR_NAME"
|
- name: "ENV_VAR_NAME"
|
||||||
value: "any-k8s-env-things"
|
value: "any-k8s-env-things"
|
||||||
|
command: ['sh', '-c', 'echo "logging" > /opt/logs.txt']
|
||||||
```
|
```
|
||||||
|
|
||||||
In addition to any environment variables you specify, the following environment
|
In addition to any environment variables you specify, the following environment
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ tools:
|
||||||
# install pinned version of 'kind'
|
# install pinned version of 'kind'
|
||||||
# go install must run outside of a dir with a (module-based) Go project !
|
# go install must run outside of a dir with a (module-based) Go project !
|
||||||
# otherwise go install updates project's dependencies and/or behaves differently
|
# otherwise go install updates project's dependencies and/or behaves differently
|
||||||
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.23.0
|
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.24.0
|
||||||
|
|
||||||
e2etest: tools copy clean
|
e2etest: tools copy clean
|
||||||
./run.sh main
|
./run.sh main
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ IFS=$'\n\t'
|
||||||
|
|
||||||
readonly cluster_name="postgres-operator-e2e-tests"
|
readonly cluster_name="postgres-operator-e2e-tests"
|
||||||
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
||||||
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-16-e2e:0.1"
|
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3"
|
||||||
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
|
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
|
||||||
|
|
||||||
export GOPATH=${GOPATH-~/go}
|
export GOPATH=${GOPATH-~/go}
|
||||||
|
|
|
||||||
|
|
@ -1198,35 +1198,35 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
k8s = self.k8s
|
k8s = self.k8s
|
||||||
cluster_label = 'application=spilo,cluster-name=acid-upgrade-test'
|
cluster_label = 'application=spilo,cluster-name=acid-upgrade-test'
|
||||||
|
|
||||||
with open("manifests/minimal-postgres-manifest-12.yaml", 'r+') as f:
|
with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'r+') as f:
|
||||||
upgrade_manifest = yaml.safe_load(f)
|
upgrade_manifest = yaml.safe_load(f)
|
||||||
upgrade_manifest["spec"]["dockerImage"] = SPILO_FULL_IMAGE
|
upgrade_manifest["spec"]["dockerImage"] = SPILO_FULL_IMAGE
|
||||||
|
|
||||||
with open("manifests/minimal-postgres-manifest-12.yaml", 'w') as f:
|
with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'w') as f:
|
||||||
yaml.dump(upgrade_manifest, f, Dumper=yaml.Dumper)
|
yaml.dump(upgrade_manifest, f, Dumper=yaml.Dumper)
|
||||||
|
|
||||||
k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml")
|
k8s.create_with_kubectl("manifests/minimal-postgres-lowest-version-manifest.yaml")
|
||||||
self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running")
|
self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running")
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
self.eventuallyEqual(check_version, 12, "Version is not correct")
|
self.eventuallyEqual(check_version, 13, "Version is not correct")
|
||||||
|
|
||||||
master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label)
|
master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label)
|
||||||
# should upgrade immediately
|
# should upgrade immediately
|
||||||
pg_patch_version_13 = {
|
pg_patch_version_14 = {
|
||||||
"spec": {
|
"spec": {
|
||||||
"postgresql": {
|
"postgresql": {
|
||||||
"version": "13"
|
"version": "14"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_13)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
self.eventuallyEqual(check_version, 13, "Version should be upgraded from 12 to 13")
|
self.eventuallyEqual(check_version, 14, "Version should be upgraded from 13 to 14")
|
||||||
|
|
||||||
# check if annotation for last upgrade's success is set
|
# check if annotation for last upgrade's success is set
|
||||||
annotations = get_annotations()
|
annotations = get_annotations()
|
||||||
|
|
@ -1235,10 +1235,10 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
# should not upgrade because current time is not in maintenanceWindow
|
# should not upgrade because current time is not in maintenanceWindow
|
||||||
current_time = datetime.now()
|
current_time = datetime.now()
|
||||||
maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}"
|
maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}"
|
||||||
pg_patch_version_14 = {
|
pg_patch_version_15 = {
|
||||||
"spec": {
|
"spec": {
|
||||||
"postgresql": {
|
"postgresql": {
|
||||||
"version": "14"
|
"version": "15"
|
||||||
},
|
},
|
||||||
"maintenanceWindows": [
|
"maintenanceWindows": [
|
||||||
maintenance_window_future
|
maintenance_window_future
|
||||||
|
|
@ -1246,23 +1246,23 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
self.eventuallyEqual(check_version, 13, "Version should not be upgraded")
|
self.eventuallyEqual(check_version, 14, "Version should not be upgraded")
|
||||||
|
|
||||||
second_annotations = get_annotations()
|
second_annotations = get_annotations()
|
||||||
self.assertIsNone(second_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure should not be set")
|
self.assertIsNone(second_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure should not be set")
|
||||||
|
|
||||||
# change the version again to trigger operator sync
|
# change the version again to trigger operator sync
|
||||||
maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}"
|
maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}"
|
||||||
pg_patch_version_15 = {
|
pg_patch_version_16 = {
|
||||||
"spec": {
|
"spec": {
|
||||||
"postgresql": {
|
"postgresql": {
|
||||||
"version": "15"
|
"version": "16"
|
||||||
},
|
},
|
||||||
"maintenanceWindows": [
|
"maintenanceWindows": [
|
||||||
maintenance_window_current
|
maintenance_window_current
|
||||||
|
|
@ -1271,13 +1271,13 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
|
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
self.eventuallyEqual(check_version, 15, "Version should be upgraded from 13 to 15")
|
self.eventuallyEqual(check_version, 16, "Version should be upgraded from 14 to 16")
|
||||||
|
|
||||||
# check if annotation for last upgrade's success is updated after second upgrade
|
# check if annotation for last upgrade's success is updated after second upgrade
|
||||||
third_annotations = get_annotations()
|
third_annotations = get_annotations()
|
||||||
|
|
@ -1285,7 +1285,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.assertNotEqual(annotations.get("last-major-upgrade-success"), third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not updated")
|
self.assertNotEqual(annotations.get("last-major-upgrade-success"), third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not updated")
|
||||||
|
|
||||||
# test upgrade with failed upgrade annotation
|
# test upgrade with failed upgrade annotation
|
||||||
pg_patch_version_16 = {
|
pg_patch_version_17 = {
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"annotations": {
|
"annotations": {
|
||||||
"last-major-upgrade-failure": "2024-01-02T15:04:05Z"
|
"last-major-upgrade-failure": "2024-01-02T15:04:05Z"
|
||||||
|
|
@ -1293,18 +1293,18 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
"postgresql": {
|
"postgresql": {
|
||||||
"version": "16"
|
"version": "17"
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_17)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
self.eventuallyEqual(check_version, 15, "Version should not be upgraded because annotation for last upgrade's failure is set")
|
self.eventuallyEqual(check_version, 16, "Version should not be upgraded because annotation for last upgrade's failure is set")
|
||||||
|
|
||||||
# change the version back to 15 and should remove failure annotation
|
# change the version back to 15 and should remove failure annotation
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
|
|
@ -2201,6 +2201,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
{
|
{
|
||||||
"applicationId": "test-app",
|
"applicationId": "test-app",
|
||||||
"batchSize": 100,
|
"batchSize": 100,
|
||||||
|
"cpu": "100m",
|
||||||
|
"memory": "200Mi",
|
||||||
"database": "foo",
|
"database": "foo",
|
||||||
"enableRecovery": True,
|
"enableRecovery": True,
|
||||||
"tables": {
|
"tables": {
|
||||||
|
|
@ -2222,7 +2224,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"eventType": "test-event",
|
"eventType": "test-event",
|
||||||
"idColumn": "id",
|
"idColumn": "id",
|
||||||
"payloadColumn": "payload",
|
"payloadColumn": "payload",
|
||||||
"recoveryEventType": "test-event-dlq"
|
"ignoreRecovery": True
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
12
go.mod
12
go.mod
|
|
@ -1,6 +1,6 @@
|
||||||
module github.com/zalando/postgres-operator
|
module github.com/zalando/postgres-operator
|
||||||
|
|
||||||
go 1.22.0
|
go 1.23.4
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go v1.53.8
|
github.com/aws/aws-sdk-go v1.53.8
|
||||||
|
|
@ -11,7 +11,7 @@ require (
|
||||||
github.com/r3labs/diff v1.1.0
|
github.com/r3labs/diff v1.1.0
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
golang.org/x/crypto v0.26.0
|
golang.org/x/crypto v0.31.0
|
||||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
k8s.io/api v0.30.4
|
k8s.io/api v0.30.4
|
||||||
|
|
@ -54,10 +54,10 @@ require (
|
||||||
golang.org/x/mod v0.17.0 // indirect
|
golang.org/x/mod v0.17.0 // indirect
|
||||||
golang.org/x/net v0.25.0 // indirect
|
golang.org/x/net v0.25.0 // indirect
|
||||||
golang.org/x/oauth2 v0.10.0 // indirect
|
golang.org/x/oauth2 v0.10.0 // indirect
|
||||||
golang.org/x/sync v0.8.0 // indirect
|
golang.org/x/sync v0.10.0 // indirect
|
||||||
golang.org/x/sys v0.23.0 // indirect
|
golang.org/x/sys v0.28.0 // indirect
|
||||||
golang.org/x/term v0.23.0 // indirect
|
golang.org/x/term v0.27.0 // indirect
|
||||||
golang.org/x/text v0.17.0 // indirect
|
golang.org/x/text v0.21.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
|
|
|
||||||
20
go.sum
20
go.sum
|
|
@ -119,8 +119,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
|
@ -142,8 +142,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
|
@ -151,16 +151,16 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
|
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||||
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
|
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
module github.com/zalando/postgres-operator/kubectl-pg
|
module github.com/zalando/postgres-operator/kubectl-pg
|
||||||
|
|
||||||
go 1.22.0
|
go 1.23.4
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/spf13/cobra v1.8.1
|
github.com/spf13/cobra v1.8.1
|
||||||
|
|
@ -51,13 +51,13 @@ require (
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/subosito/gotenv v1.6.0 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.26.0 // indirect
|
golang.org/x/crypto v0.31.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||||
golang.org/x/net v0.25.0 // indirect
|
golang.org/x/net v0.25.0 // indirect
|
||||||
golang.org/x/oauth2 v0.18.0 // indirect
|
golang.org/x/oauth2 v0.18.0 // indirect
|
||||||
golang.org/x/sys v0.23.0 // indirect
|
golang.org/x/sys v0.28.0 // indirect
|
||||||
golang.org/x/term v0.23.0 // indirect
|
golang.org/x/term v0.27.0 // indirect
|
||||||
golang.org/x/text v0.17.0 // indirect
|
golang.org/x/text v0.21.0 // indirect
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
google.golang.org/appengine v1.6.8 // indirect
|
google.golang.org/appengine v1.6.8 // indirect
|
||||||
google.golang.org/protobuf v1.33.0 // indirect
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
|
|
|
||||||
|
|
@ -137,8 +137,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
|
@ -166,18 +166,18 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
|
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||||
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
|
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
|
|
||||||
|
|
@ -25,11 +25,11 @@ RUN apt-get update \
|
||||||
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
|
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get install --no-install-recommends -y \
|
&& apt-get install --no-install-recommends -y \
|
||||||
|
postgresql-client-17 \
|
||||||
postgresql-client-16 \
|
postgresql-client-16 \
|
||||||
postgresql-client-15 \
|
postgresql-client-15 \
|
||||||
postgresql-client-14 \
|
postgresql-client-14 \
|
||||||
postgresql-client-13 \
|
postgresql-client-13 \
|
||||||
postgresql-client-12 \
|
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ metadata:
|
||||||
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||||
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||||
spec:
|
spec:
|
||||||
dockerImage: ghcr.io/zalando/spilo-16:3.3-p1
|
dockerImage: ghcr.io/zalando/spilo-17:4.0-p2
|
||||||
teamId: "acid"
|
teamId: "acid"
|
||||||
numberOfInstances: 2
|
numberOfInstances: 2
|
||||||
users: # Application/Robot users
|
users: # Application/Robot users
|
||||||
|
|
@ -48,7 +48,7 @@ spec:
|
||||||
defaultRoles: true
|
defaultRoles: true
|
||||||
defaultUsers: false
|
defaultUsers: false
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "16"
|
version: "17"
|
||||||
parameters: # Expert section
|
parameters: # Expert section
|
||||||
shared_buffers: "32MB"
|
shared_buffers: "32MB"
|
||||||
max_connections: "10"
|
max_connections: "10"
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ data:
|
||||||
default_memory_request: 100Mi
|
default_memory_request: 100Mi
|
||||||
# delete_annotation_date_key: delete-date
|
# delete_annotation_date_key: delete-date
|
||||||
# delete_annotation_name_key: delete-clustername
|
# delete_annotation_name_key: delete-clustername
|
||||||
docker_image: ghcr.io/zalando/spilo-16:3.3-p1
|
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
|
||||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||||
enable_admin_role_for_users: "true"
|
enable_admin_role_for_users: "true"
|
||||||
enable_crd_registration: "true"
|
enable_crd_registration: "true"
|
||||||
|
|
@ -86,7 +86,7 @@ data:
|
||||||
# logical_backup_cpu_limit: ""
|
# logical_backup_cpu_limit: ""
|
||||||
# logical_backup_cpu_request: ""
|
# logical_backup_cpu_request: ""
|
||||||
logical_backup_cronjob_environment_secret: ""
|
logical_backup_cronjob_environment_secret: ""
|
||||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
|
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||||
# logical_backup_google_application_credentials: ""
|
# logical_backup_google_application_credentials: ""
|
||||||
logical_backup_job_prefix: "logical-backup-"
|
logical_backup_job_prefix: "logical-backup-"
|
||||||
# logical_backup_memory_limit: ""
|
# logical_backup_memory_limit: ""
|
||||||
|
|
@ -112,7 +112,7 @@ data:
|
||||||
min_cpu_limit: 250m
|
min_cpu_limit: 250m
|
||||||
min_instances: "-1"
|
min_instances: "-1"
|
||||||
min_memory_limit: 250Mi
|
min_memory_limit: 250Mi
|
||||||
minimal_major_version: "12"
|
minimal_major_version: "13"
|
||||||
# node_readiness_label: "status:ready"
|
# node_readiness_label: "status:ready"
|
||||||
# node_readiness_label_merge: "OR"
|
# node_readiness_label_merge: "OR"
|
||||||
oauth_token_secret_name: postgresql-operator
|
oauth_token_secret_name: postgresql-operator
|
||||||
|
|
@ -163,7 +163,7 @@ data:
|
||||||
spilo_privileged: "false"
|
spilo_privileged: "false"
|
||||||
storage_resize_mode: "pvc"
|
storage_resize_mode: "pvc"
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
target_major_version: "16"
|
target_major_version: "17"
|
||||||
team_admin_role: "admin"
|
team_admin_role: "admin"
|
||||||
team_api_role_configuration: "log_statement:all"
|
team_api_role_configuration: "log_statement:all"
|
||||||
teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||||
|
|
|
||||||
|
|
@ -31,11 +31,21 @@ spec:
|
||||||
version: "13"
|
version: "13"
|
||||||
sidecars:
|
sidecars:
|
||||||
- name: "exporter"
|
- name: "exporter"
|
||||||
image: "wrouesnel/postgres_exporter"
|
image: "quay.io/prometheuscommunity/postgres-exporter:v0.15.0"
|
||||||
ports:
|
ports:
|
||||||
- name: exporter
|
- name: exporter
|
||||||
containerPort: 9187
|
containerPort: 9187
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
env:
|
||||||
|
- name: DATA_SOURCE_URI
|
||||||
|
value: ":5432/?sslmode=disable"
|
||||||
|
- name: DATA_SOURCE_USER
|
||||||
|
value: "postgres"
|
||||||
|
- name: DATA_SOURCE_PASS
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: postgres.test-pg.credentials.postgresql.acid.zalan.do
|
||||||
|
key: password
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: 500m
|
cpu: 500m
|
||||||
|
|
|
||||||
|
|
@ -17,4 +17,4 @@ spec:
|
||||||
preparedDatabases:
|
preparedDatabases:
|
||||||
bar: {}
|
bar: {}
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "12"
|
version: "13"
|
||||||
|
|
@ -17,4 +17,4 @@ spec:
|
||||||
preparedDatabases:
|
preparedDatabases:
|
||||||
bar: {}
|
bar: {}
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "16"
|
version: "17"
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,7 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
docker_image:
|
docker_image:
|
||||||
type: string
|
type: string
|
||||||
default: "ghcr.io/zalando/spilo-16:3.3-p1"
|
default: "ghcr.io/zalando/spilo-17:4.0-p2"
|
||||||
enable_crd_registration:
|
enable_crd_registration:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
|
|
@ -165,10 +165,10 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
minimal_major_version:
|
minimal_major_version:
|
||||||
type: string
|
type: string
|
||||||
default: "12"
|
default: "13"
|
||||||
target_major_version:
|
target_major_version:
|
||||||
type: string
|
type: string
|
||||||
default: "16"
|
default: "17"
|
||||||
kubernetes:
|
kubernetes:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -511,7 +511,7 @@ spec:
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
logical_backup_docker_image:
|
logical_backup_docker_image:
|
||||||
type: string
|
type: string
|
||||||
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
|
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||||
logical_backup_google_application_credentials:
|
logical_backup_google_application_credentials:
|
||||||
type: string
|
type: string
|
||||||
logical_backup_job_prefix:
|
logical_backup_job_prefix:
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ spec:
|
||||||
serviceAccountName: postgres-operator
|
serviceAccountName: postgres-operator
|
||||||
containers:
|
containers:
|
||||||
- name: postgres-operator
|
- name: postgres-operator
|
||||||
image: ghcr.io/zalando/postgres-operator:v1.13.0
|
image: ghcr.io/zalando/postgres-operator:v1.14.0
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ kind: OperatorConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
name: postgresql-operator-default-configuration
|
name: postgresql-operator-default-configuration
|
||||||
configuration:
|
configuration:
|
||||||
docker_image: ghcr.io/zalando/spilo-16:3.3-p1
|
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
|
||||||
# enable_crd_registration: true
|
# enable_crd_registration: true
|
||||||
# crd_categories:
|
# crd_categories:
|
||||||
# - all
|
# - all
|
||||||
|
|
@ -39,8 +39,8 @@ configuration:
|
||||||
major_version_upgrade_mode: "manual"
|
major_version_upgrade_mode: "manual"
|
||||||
# major_version_upgrade_team_allow_list:
|
# major_version_upgrade_team_allow_list:
|
||||||
# - acid
|
# - acid
|
||||||
minimal_major_version: "12"
|
minimal_major_version: "13"
|
||||||
target_major_version: "16"
|
target_major_version: "17"
|
||||||
kubernetes:
|
kubernetes:
|
||||||
# additional_pod_capabilities:
|
# additional_pod_capabilities:
|
||||||
# - "SYS_NICE"
|
# - "SYS_NICE"
|
||||||
|
|
@ -169,7 +169,7 @@ configuration:
|
||||||
# logical_backup_cpu_request: ""
|
# logical_backup_cpu_request: ""
|
||||||
# logical_backup_memory_limit: ""
|
# logical_backup_memory_limit: ""
|
||||||
# logical_backup_memory_request: ""
|
# logical_backup_memory_request: ""
|
||||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
|
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||||
# logical_backup_google_application_credentials: ""
|
# logical_backup_google_application_credentials: ""
|
||||||
logical_backup_job_prefix: "logical-backup-"
|
logical_backup_job_prefix: "logical-backup-"
|
||||||
logical_backup_provider: "s3"
|
logical_backup_provider: "s3"
|
||||||
|
|
|
||||||
|
|
@ -373,11 +373,11 @@ spec:
|
||||||
version:
|
version:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
- "12"
|
|
||||||
- "13"
|
- "13"
|
||||||
- "14"
|
- "14"
|
||||||
- "15"
|
- "15"
|
||||||
- "16"
|
- "16"
|
||||||
|
- "17"
|
||||||
parameters:
|
parameters:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
|
|
@ -512,6 +512,9 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
batchSize:
|
batchSize:
|
||||||
type: integer
|
type: integer
|
||||||
|
cpu:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
database:
|
database:
|
||||||
type: string
|
type: string
|
||||||
enableRecovery:
|
enableRecovery:
|
||||||
|
|
@ -520,6 +523,9 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
memory:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
tables:
|
tables:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
|
|
@ -531,6 +537,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
idColumn:
|
idColumn:
|
||||||
type: string
|
type: string
|
||||||
|
ignoreRecovery:
|
||||||
|
type: boolean
|
||||||
payloadColumn:
|
payloadColumn:
|
||||||
type: string
|
type: string
|
||||||
recoveryEventType:
|
recoveryEventType:
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ spec:
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
numberOfInstances: 1
|
numberOfInstances: 1
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "16"
|
version: "17"
|
||||||
# Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming.
|
# Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming.
|
||||||
standby:
|
standby:
|
||||||
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"
|
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"
|
||||||
|
|
|
||||||
|
|
@ -595,9 +595,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"version": {
|
"version": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Enum: []apiextv1.JSON{
|
Enum: []apiextv1.JSON{
|
||||||
{
|
|
||||||
Raw: []byte(`"12"`),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Raw: []byte(`"13"`),
|
Raw: []byte(`"13"`),
|
||||||
},
|
},
|
||||||
|
|
@ -610,6 +607,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
{
|
{
|
||||||
Raw: []byte(`"16"`),
|
Raw: []byte(`"16"`),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Raw: []byte(`"17"`),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
|
|
@ -1165,6 +1165,7 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
"enable_spilo_wal_path_compat": {
|
"enable_spilo_wal_path_compat": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
|
Description: "deprecated",
|
||||||
},
|
},
|
||||||
"enable_team_id_clustername_prefix": {
|
"enable_team_id_clustername_prefix": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
|
|
|
||||||
|
|
@ -49,8 +49,8 @@ type PostgresUsersConfiguration struct {
|
||||||
type MajorVersionUpgradeConfiguration struct {
|
type MajorVersionUpgradeConfiguration struct {
|
||||||
MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"manual"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
|
MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"manual"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
|
||||||
MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"`
|
MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"`
|
||||||
MinimalMajorVersion string `json:"minimal_major_version" default:"12"`
|
MinimalMajorVersion string `json:"minimal_major_version" default:"13"`
|
||||||
TargetMajorVersion string `json:"target_major_version" default:"16"`
|
TargetMajorVersion string `json:"target_major_version" default:"17"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
|
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
|
||||||
|
|
|
||||||
|
|
@ -220,6 +220,7 @@ type Sidecar struct {
|
||||||
DockerImage string `json:"image,omitempty"`
|
DockerImage string `json:"image,omitempty"`
|
||||||
Ports []v1.ContainerPort `json:"ports,omitempty"`
|
Ports []v1.ContainerPort `json:"ports,omitempty"`
|
||||||
Env []v1.EnvVar `json:"env,omitempty"`
|
Env []v1.EnvVar `json:"env,omitempty"`
|
||||||
|
Command []string `json:"command,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users
|
// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users
|
||||||
|
|
@ -258,6 +259,8 @@ type Stream struct {
|
||||||
Tables map[string]StreamTable `json:"tables"`
|
Tables map[string]StreamTable `json:"tables"`
|
||||||
Filter map[string]*string `json:"filter,omitempty"`
|
Filter map[string]*string `json:"filter,omitempty"`
|
||||||
BatchSize *uint32 `json:"batchSize,omitempty"`
|
BatchSize *uint32 `json:"batchSize,omitempty"`
|
||||||
|
CPU *string `json:"cpu,omitempty"`
|
||||||
|
Memory *string `json:"memory,omitempty"`
|
||||||
EnableRecovery *bool `json:"enableRecovery,omitempty"`
|
EnableRecovery *bool `json:"enableRecovery,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -265,6 +268,7 @@ type Stream struct {
|
||||||
type StreamTable struct {
|
type StreamTable struct {
|
||||||
EventType string `json:"eventType"`
|
EventType string `json:"eventType"`
|
||||||
RecoveryEventType string `json:"recoveryEventType,omitempty"`
|
RecoveryEventType string `json:"recoveryEventType,omitempty"`
|
||||||
|
IgnoreRecovery *bool `json:"ignoreRecovery,omitempty"`
|
||||||
IdColumn *string `json:"idColumn,omitempty"`
|
IdColumn *string `json:"idColumn,omitempty"`
|
||||||
PayloadColumn *string `json:"payloadColumn,omitempty"`
|
PayloadColumn *string `json:"payloadColumn,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -219,7 +219,7 @@ var unmarshalCluster = []struct {
|
||||||
"127.0.0.1/32"
|
"127.0.0.1/32"
|
||||||
],
|
],
|
||||||
"postgresql": {
|
"postgresql": {
|
||||||
"version": "16",
|
"version": "17",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"shared_buffers": "32MB",
|
"shared_buffers": "32MB",
|
||||||
"max_connections": "10",
|
"max_connections": "10",
|
||||||
|
|
@ -279,7 +279,7 @@ var unmarshalCluster = []struct {
|
||||||
},
|
},
|
||||||
Spec: PostgresSpec{
|
Spec: PostgresSpec{
|
||||||
PostgresqlParam: PostgresqlParam{
|
PostgresqlParam: PostgresqlParam{
|
||||||
PgVersion: "16",
|
PgVersion: "17",
|
||||||
Parameters: map[string]string{
|
Parameters: map[string]string{
|
||||||
"shared_buffers": "32MB",
|
"shared_buffers": "32MB",
|
||||||
"max_connections": "10",
|
"max_connections": "10",
|
||||||
|
|
@ -339,7 +339,7 @@ var unmarshalCluster = []struct {
|
||||||
},
|
},
|
||||||
Error: "",
|
Error: "",
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"16","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"17","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
{
|
{
|
||||||
about: "example with clone",
|
about: "example with clone",
|
||||||
|
|
@ -404,7 +404,7 @@ var postgresqlList = []struct {
|
||||||
out PostgresqlList
|
out PostgresqlList
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"16"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"17"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
||||||
PostgresqlList{
|
PostgresqlList{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "List",
|
Kind: "List",
|
||||||
|
|
@ -425,7 +425,7 @@ var postgresqlList = []struct {
|
||||||
},
|
},
|
||||||
Spec: PostgresSpec{
|
Spec: PostgresSpec{
|
||||||
ClusterName: "testcluster42",
|
ClusterName: "testcluster42",
|
||||||
PostgresqlParam: PostgresqlParam{PgVersion: "16"},
|
PostgresqlParam: PostgresqlParam{PgVersion: "17"},
|
||||||
Volume: Volume{Size: "10Gi"},
|
Volume: Volume{Size: "10Gi"},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
AllowedSourceRanges: []string{"185.85.220.0/22"},
|
AllowedSourceRanges: []string{"185.85.220.0/22"},
|
||||||
|
|
|
||||||
|
|
@ -1277,6 +1277,11 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) {
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.Command != nil {
|
||||||
|
in, out := &in.Command, &out.Command
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1336,6 +1341,16 @@ func (in *Stream) DeepCopyInto(out *Stream) {
|
||||||
*out = new(uint32)
|
*out = new(uint32)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.CPU != nil {
|
||||||
|
in, out := &in.CPU, &out.CPU
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Memory != nil {
|
||||||
|
in, out := &in.Memory, &out.Memory
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.EnableRecovery != nil {
|
if in.EnableRecovery != nil {
|
||||||
in, out := &in.EnableRecovery, &out.EnableRecovery
|
in, out := &in.EnableRecovery, &out.EnableRecovery
|
||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
|
|
@ -1357,6 +1372,11 @@ func (in *Stream) DeepCopy() *Stream {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *StreamTable) DeepCopyInto(out *StreamTable) {
|
func (in *StreamTable) DeepCopyInto(out *StreamTable) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.IgnoreRecovery != nil {
|
||||||
|
in, out := &in.IgnoreRecovery, &out.IgnoreRecovery
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.IdColumn != nil {
|
if in.IdColumn != nil {
|
||||||
in, out := &in.IdColumn, &out.IdColumn
|
in, out := &in.IdColumn, &out.IdColumn
|
||||||
*out = new(string)
|
*out = new(string)
|
||||||
|
|
|
||||||
|
|
@ -1230,6 +1230,7 @@ func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.Resour
|
||||||
Resources: *resources,
|
Resources: *resources,
|
||||||
Env: sidecar.Env,
|
Env: sidecar.Env,
|
||||||
Ports: sidecar.Ports,
|
Ports: sidecar.Ports,
|
||||||
|
Command: sidecar.Command,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -72,18 +72,18 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
subtest: "Patroni default configuration",
|
subtest: "Patroni default configuration",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||||
patroni: &acidv1.Patroni{},
|
patroni: &acidv1.Patroni{},
|
||||||
opConfig: &config.Config{
|
opConfig: &config.Config{
|
||||||
Auth: config.Auth{
|
Auth: config.Auth{
|
||||||
PamRoleName: "zalandos",
|
PamRoleName: "zalandos",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`,
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "Patroni configured",
|
subtest: "Patroni configured",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||||
patroni: &acidv1.Patroni{
|
patroni: &acidv1.Patroni{
|
||||||
InitDB: map[string]string{
|
InitDB: map[string]string{
|
||||||
"encoding": "UTF8",
|
"encoding": "UTF8",
|
||||||
|
|
@ -102,38 +102,38 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
|
||||||
FailsafeMode: util.True(),
|
FailsafeMode: util.True(),
|
||||||
},
|
},
|
||||||
opConfig: &config.Config{},
|
opConfig: &config.Config{},
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`,
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "Patroni failsafe_mode configured globally",
|
subtest: "Patroni failsafe_mode configured globally",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||||
patroni: &acidv1.Patroni{},
|
patroni: &acidv1.Patroni{},
|
||||||
opConfig: &config.Config{
|
opConfig: &config.Config{
|
||||||
EnablePatroniFailsafeMode: util.True(),
|
EnablePatroniFailsafeMode: util.True(),
|
||||||
},
|
},
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "Patroni failsafe_mode configured globally, disabled for cluster",
|
subtest: "Patroni failsafe_mode configured globally, disabled for cluster",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||||
patroni: &acidv1.Patroni{
|
patroni: &acidv1.Patroni{
|
||||||
FailsafeMode: util.False(),
|
FailsafeMode: util.False(),
|
||||||
},
|
},
|
||||||
opConfig: &config.Config{
|
opConfig: &config.Config{
|
||||||
EnablePatroniFailsafeMode: util.True(),
|
EnablePatroniFailsafeMode: util.True(),
|
||||||
},
|
},
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`,
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "Patroni failsafe_mode disabled globally, configured for cluster",
|
subtest: "Patroni failsafe_mode disabled globally, configured for cluster",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||||
patroni: &acidv1.Patroni{
|
patroni: &acidv1.Patroni{
|
||||||
FailsafeMode: util.True(),
|
FailsafeMode: util.True(),
|
||||||
},
|
},
|
||||||
opConfig: &config.Config{
|
opConfig: &config.Config{
|
||||||
EnablePatroniFailsafeMode: util.False(),
|
EnablePatroniFailsafeMode: util.False(),
|
||||||
},
|
},
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
|
@ -164,15 +164,15 @@ func TestExtractPgVersionFromBinPath(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "test current bin path against hard coded template",
|
subTest: "test current bin path against hard coded template",
|
||||||
binPath: "/usr/lib/postgresql/16/bin",
|
binPath: "/usr/lib/postgresql/17/bin",
|
||||||
template: pgBinariesLocationTemplate,
|
template: pgBinariesLocationTemplate,
|
||||||
expected: "16",
|
expected: "17",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "test alternative bin path against a matching template",
|
subTest: "test alternative bin path against a matching template",
|
||||||
binPath: "/usr/pgsql-16/bin",
|
binPath: "/usr/pgsql-17/bin",
|
||||||
template: "/usr/pgsql-%v/bin",
|
template: "/usr/pgsql-%v/bin",
|
||||||
expected: "16",
|
expected: "17",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2148,7 +2148,7 @@ func TestSidecars(t *testing.T) {
|
||||||
|
|
||||||
spec = acidv1.PostgresSpec{
|
spec = acidv1.PostgresSpec{
|
||||||
PostgresqlParam: acidv1.PostgresqlParam{
|
PostgresqlParam: acidv1.PostgresqlParam{
|
||||||
PgVersion: "16",
|
PgVersion: "17",
|
||||||
Parameters: map[string]string{
|
Parameters: map[string]string{
|
||||||
"max_connections": "100",
|
"max_connections": "100",
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ var VersionMap = map[string]int{
|
||||||
"14": 140000,
|
"14": 140000,
|
||||||
"15": 150000,
|
"15": 150000,
|
||||||
"16": 160000,
|
"16": 160000,
|
||||||
|
"17": 170000,
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -44,7 +45,7 @@ func (c *Cluster) GetDesiredMajorVersionAsInt() int {
|
||||||
func (c *Cluster) GetDesiredMajorVersion() string {
|
func (c *Cluster) GetDesiredMajorVersion() string {
|
||||||
|
|
||||||
if c.Config.OpConfig.MajorVersionUpgradeMode == "full" {
|
if c.Config.OpConfig.MajorVersionUpgradeMode == "full" {
|
||||||
// e.g. current is 12, minimal is 12 allowing 12 to 16 clusters, everything below is upgraded
|
// e.g. current is 13, minimal is 13 allowing 13 to 17 clusters, everything below is upgraded
|
||||||
if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {
|
if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {
|
||||||
c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)
|
c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)
|
||||||
return c.Config.OpConfig.TargetMajorVersion
|
return c.Config.OpConfig.TargetMajorVersion
|
||||||
|
|
|
||||||
|
|
@ -178,16 +178,25 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za
|
||||||
|
|
||||||
func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEventStream {
|
func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEventStream {
|
||||||
eventStreams := make([]zalandov1.EventStream, 0)
|
eventStreams := make([]zalandov1.EventStream, 0)
|
||||||
|
resourceAnnotations := map[string]string{}
|
||||||
|
var err, err2 error
|
||||||
|
|
||||||
for _, stream := range c.Spec.Streams {
|
for _, stream := range c.Spec.Streams {
|
||||||
if stream.ApplicationId != appId {
|
if stream.ApplicationId != appId {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = setResourceAnnotation(&resourceAnnotations, stream.CPU, constants.EventStreamCpuAnnotationKey)
|
||||||
|
err2 = setResourceAnnotation(&resourceAnnotations, stream.Memory, constants.EventStreamMemoryAnnotationKey)
|
||||||
|
if err != nil || err2 != nil {
|
||||||
|
c.logger.Warningf("could not set resource annotation for event stream: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
for tableName, table := range stream.Tables {
|
for tableName, table := range stream.Tables {
|
||||||
streamSource := c.getEventStreamSource(stream, tableName, table.IdColumn)
|
streamSource := c.getEventStreamSource(stream, tableName, table.IdColumn)
|
||||||
streamFlow := getEventStreamFlow(table.PayloadColumn)
|
streamFlow := getEventStreamFlow(table.PayloadColumn)
|
||||||
streamSink := getEventStreamSink(stream, table.EventType)
|
streamSink := getEventStreamSink(stream, table.EventType)
|
||||||
streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType)
|
streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType, table.IgnoreRecovery)
|
||||||
|
|
||||||
eventStreams = append(eventStreams, zalandov1.EventStream{
|
eventStreams = append(eventStreams, zalandov1.EventStream{
|
||||||
EventStreamFlow: streamFlow,
|
EventStreamFlow: streamFlow,
|
||||||
|
|
@ -207,7 +216,7 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent
|
||||||
Name: fmt.Sprintf("%s-%s", c.Name, strings.ToLower(util.RandomPassword(5))),
|
Name: fmt.Sprintf("%s-%s", c.Name, strings.ToLower(util.RandomPassword(5))),
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Labels: c.labelsSet(true),
|
Labels: c.labelsSet(true),
|
||||||
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
|
Annotations: c.AnnotationsToPropagate(c.annotationsSet(resourceAnnotations)),
|
||||||
OwnerReferences: c.ownerReferences(),
|
OwnerReferences: c.ownerReferences(),
|
||||||
},
|
},
|
||||||
Spec: zalandov1.FabricEventStreamSpec{
|
Spec: zalandov1.FabricEventStreamSpec{
|
||||||
|
|
@ -217,6 +226,27 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setResourceAnnotation(annotations *map[string]string, resource *string, key string) error {
|
||||||
|
var (
|
||||||
|
isSmaller bool
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if resource != nil {
|
||||||
|
currentValue, exists := (*annotations)[key]
|
||||||
|
if exists {
|
||||||
|
isSmaller, err = util.IsSmallerQuantity(currentValue, *resource)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not compare resource in %q annotation: %v", key, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isSmaller || !exists {
|
||||||
|
(*annotations)[key] = *resource
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, idColumn *string) zalandov1.EventStreamSource {
|
func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, idColumn *string) zalandov1.EventStreamSource {
|
||||||
table, schema := getTableSchema(tableName)
|
table, schema := getTableSchema(tableName)
|
||||||
streamFilter := stream.Filter[tableName]
|
streamFilter := stream.Filter[tableName]
|
||||||
|
|
@ -247,7 +277,7 @@ func getEventStreamSink(stream acidv1.Stream, eventType string) zalandov1.EventS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string) zalandov1.EventStreamRecovery {
|
func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string, ignoreRecovery *bool) zalandov1.EventStreamRecovery {
|
||||||
if (stream.EnableRecovery != nil && !*stream.EnableRecovery) ||
|
if (stream.EnableRecovery != nil && !*stream.EnableRecovery) ||
|
||||||
(stream.EnableRecovery == nil && recoveryEventType == "") {
|
(stream.EnableRecovery == nil && recoveryEventType == "") {
|
||||||
return zalandov1.EventStreamRecovery{
|
return zalandov1.EventStreamRecovery{
|
||||||
|
|
@ -255,6 +285,12 @@ func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ignoreRecovery != nil && *ignoreRecovery {
|
||||||
|
return zalandov1.EventStreamRecovery{
|
||||||
|
Type: constants.EventStreamRecoveryIgnoreType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if stream.EnableRecovery != nil && *stream.EnableRecovery && recoveryEventType == "" {
|
if stream.EnableRecovery != nil && *stream.EnableRecovery && recoveryEventType == "" {
|
||||||
recoveryEventType = fmt.Sprintf("%s-%s", eventType, constants.EventStreamRecoverySuffix)
|
recoveryEventType = fmt.Sprintf("%s-%s", eventType, constants.EventStreamRecoverySuffix)
|
||||||
}
|
}
|
||||||
|
|
@ -442,7 +478,9 @@ func (c *Cluster) syncStream(appId string) error {
|
||||||
c.setProcessName("syncing stream with applicationId %s", appId)
|
c.setProcessName("syncing stream with applicationId %s", appId)
|
||||||
c.logger.Debugf("syncing stream with applicationId %s", appId)
|
c.logger.Debugf("syncing stream with applicationId %s", appId)
|
||||||
|
|
||||||
listOptions := metav1.ListOptions{LabelSelector: c.labelsSet(true).String()}
|
listOptions := metav1.ListOptions{
|
||||||
|
LabelSelector: c.labelsSet(false).String(),
|
||||||
|
}
|
||||||
streams, err = c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions)
|
streams, err = c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not list of FabricEventStreams for applicationId %s: %v", appId, err)
|
return fmt.Errorf("could not list of FabricEventStreams for applicationId %s: %v", appId, err)
|
||||||
|
|
@ -453,15 +491,6 @@ func (c *Cluster) syncStream(appId string) error {
|
||||||
if stream.Spec.ApplicationId != appId {
|
if stream.Spec.ApplicationId != appId {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if streamExists {
|
|
||||||
c.logger.Warningf("more than one event stream with applicationId %s found, delete it", appId)
|
|
||||||
if err = c.KubeClient.FabricEventStreams(stream.ObjectMeta.Namespace).Delete(context.TODO(), stream.ObjectMeta.Name, metav1.DeleteOptions{}); err != nil {
|
|
||||||
c.logger.Errorf("could not delete event stream %q with applicationId %s: %v", stream.ObjectMeta.Name, appId, err)
|
|
||||||
} else {
|
|
||||||
c.logger.Infof("redundant event stream %q with applicationId %s has been successfully deleted", stream.ObjectMeta.Name, appId)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
streamExists = true
|
streamExists = true
|
||||||
desiredStreams := c.generateFabricEventStream(appId)
|
desiredStreams := c.generateFabricEventStream(appId)
|
||||||
if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) {
|
if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) {
|
||||||
|
|
@ -476,7 +505,8 @@ func (c *Cluster) syncStream(appId string) error {
|
||||||
}
|
}
|
||||||
if match, reason := c.compareStreams(&stream, desiredStreams); !match {
|
if match, reason := c.compareStreams(&stream, desiredStreams); !match {
|
||||||
c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason)
|
c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason)
|
||||||
desiredStreams.ObjectMeta = stream.ObjectMeta
|
// make sure to keep the old name with randomly generated suffix
|
||||||
|
desiredStreams.ObjectMeta.Name = stream.ObjectMeta.Name
|
||||||
updatedStream, err := c.updateStreams(desiredStreams)
|
updatedStream, err := c.updateStreams(desiredStreams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed updating event streams %s with applicationId %s: %v", stream.Name, appId, err)
|
return fmt.Errorf("failed updating event streams %s with applicationId %s: %v", stream.Name, appId, err)
|
||||||
|
|
@ -484,6 +514,7 @@ func (c *Cluster) syncStream(appId string) error {
|
||||||
c.Streams[appId] = updatedStream
|
c.Streams[appId] = updatedStream
|
||||||
c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId)
|
c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId)
|
||||||
}
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if !streamExists {
|
if !streamExists {
|
||||||
|
|
@ -501,15 +532,29 @@ func (c *Cluster) syncStream(appId string) error {
|
||||||
|
|
||||||
func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.FabricEventStream) (match bool, reason string) {
|
func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.FabricEventStream) (match bool, reason string) {
|
||||||
reasons := make([]string, 0)
|
reasons := make([]string, 0)
|
||||||
|
desiredAnnotations := make(map[string]string)
|
||||||
match = true
|
match = true
|
||||||
|
|
||||||
// stream operator can add extra annotations so incl. current annotations in desired annotations
|
// stream operator can add extra annotations so incl. current annotations in desired annotations
|
||||||
desiredAnnotations := c.annotationsSet(curEventStreams.Annotations)
|
for curKey, curValue := range curEventStreams.Annotations {
|
||||||
|
if _, exists := desiredAnnotations[curKey]; !exists {
|
||||||
|
desiredAnnotations[curKey] = curValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add/or override annotations if cpu and memory values were changed
|
||||||
|
for newKey, newValue := range newEventStreams.Annotations {
|
||||||
|
desiredAnnotations[newKey] = newValue
|
||||||
|
}
|
||||||
if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations); changed {
|
if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations); changed {
|
||||||
match = false
|
match = false
|
||||||
reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason))
|
reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(curEventStreams.ObjectMeta.Labels, newEventStreams.ObjectMeta.Labels) {
|
||||||
|
match = false
|
||||||
|
reasons = append(reasons, "new streams labels do not match the current ones")
|
||||||
|
}
|
||||||
|
|
||||||
if changed, reason := sameEventStreams(curEventStreams.Spec.EventStreams, newEventStreams.Spec.EventStreams); !changed {
|
if changed, reason := sameEventStreams(curEventStreams.Spec.EventStreams, newEventStreams.Spec.EventStreams); !changed {
|
||||||
match = false
|
match = false
|
||||||
reasons = append(reasons, fmt.Sprintf("new streams EventStreams array does not match : %s", reason))
|
reasons = append(reasons, fmt.Sprintf("new streams EventStreams array does not match : %s", reason))
|
||||||
|
|
|
||||||
|
|
@ -65,12 +65,17 @@ var (
|
||||||
EventType: "stream-type-b",
|
EventType: "stream-type-b",
|
||||||
RecoveryEventType: "stream-type-b-dlq",
|
RecoveryEventType: "stream-type-b-dlq",
|
||||||
},
|
},
|
||||||
|
"data.foofoobar": {
|
||||||
|
EventType: "stream-type-c",
|
||||||
|
IgnoreRecovery: util.True(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
EnableRecovery: util.True(),
|
EnableRecovery: util.True(),
|
||||||
Filter: map[string]*string{
|
Filter: map[string]*string{
|
||||||
"data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"),
|
"data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"),
|
||||||
},
|
},
|
||||||
BatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
BatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||||
|
CPU: k8sutil.StringToPointer("250m"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
|
|
@ -88,9 +93,12 @@ var (
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: fmt.Sprintf("%s-12345", clusterName),
|
Name: fmt.Sprintf("%s-12345", clusterName),
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
constants.EventStreamCpuAnnotationKey: "250m",
|
||||||
|
},
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"application": "spilo",
|
"application": "spilo",
|
||||||
"cluster-name": fmt.Sprintf("%s-2", clusterName),
|
"cluster-name": clusterName,
|
||||||
"team": "acid",
|
"team": "acid",
|
||||||
},
|
},
|
||||||
OwnerReferences: []metav1.OwnerReference{
|
OwnerReferences: []metav1.OwnerReference{
|
||||||
|
|
@ -180,6 +188,37 @@ var (
|
||||||
Type: constants.EventStreamSourcePGType,
|
Type: constants.EventStreamSourcePGType,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
EventStreamFlow: zalandov1.EventStreamFlow{
|
||||||
|
Type: constants.EventStreamFlowPgGenericType,
|
||||||
|
},
|
||||||
|
EventStreamRecovery: zalandov1.EventStreamRecovery{
|
||||||
|
Type: constants.EventStreamRecoveryIgnoreType,
|
||||||
|
},
|
||||||
|
EventStreamSink: zalandov1.EventStreamSink{
|
||||||
|
EventType: "stream-type-c",
|
||||||
|
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||||
|
Type: constants.EventStreamSinkNakadiType,
|
||||||
|
},
|
||||||
|
EventStreamSource: zalandov1.EventStreamSource{
|
||||||
|
Connection: zalandov1.Connection{
|
||||||
|
DBAuth: zalandov1.DBAuth{
|
||||||
|
Name: fmt.Sprintf("fes-user.%s.credentials.postgresql.acid.zalan.do", clusterName),
|
||||||
|
PasswordKey: "password",
|
||||||
|
Type: constants.EventStreamSourceAuthType,
|
||||||
|
UserKey: "username",
|
||||||
|
},
|
||||||
|
Url: fmt.Sprintf("jdbc:postgresql://%s.%s/foo?user=%s&ssl=true&sslmode=require", clusterName, namespace, fesUser),
|
||||||
|
SlotName: slotName,
|
||||||
|
PluginType: constants.EventStreamSourcePluginType,
|
||||||
|
},
|
||||||
|
Schema: "data",
|
||||||
|
EventStreamTable: zalandov1.EventStreamTable{
|
||||||
|
Name: "foofoobar",
|
||||||
|
},
|
||||||
|
Type: constants.EventStreamSourcePGType,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -449,7 +488,7 @@ func TestGenerateFabricEventStream(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
listOptions := metav1.ListOptions{
|
listOptions := metav1.ListOptions{
|
||||||
LabelSelector: cluster.labelsSet(true).String(),
|
LabelSelector: cluster.labelsSet(false).String(),
|
||||||
}
|
}
|
||||||
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
@ -488,7 +527,8 @@ func newFabricEventStream(streams []zalandov1.EventStream, annotations map[strin
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncStreams(t *testing.T) {
|
func TestSyncStreams(t *testing.T) {
|
||||||
pg.Name = fmt.Sprintf("%s-2", pg.Name)
|
newClusterName := fmt.Sprintf("%s-2", pg.Name)
|
||||||
|
pg.Name = newClusterName
|
||||||
var cluster = New(
|
var cluster = New(
|
||||||
Config{
|
Config{
|
||||||
OpConfig: config.Config{
|
OpConfig: config.Config{
|
||||||
|
|
@ -500,7 +540,6 @@ func TestSyncStreams(t *testing.T) {
|
||||||
DefaultCPULimit: "300m",
|
DefaultCPULimit: "300m",
|
||||||
DefaultMemoryRequest: "300Mi",
|
DefaultMemoryRequest: "300Mi",
|
||||||
DefaultMemoryLimit: "300Mi",
|
DefaultMemoryLimit: "300Mi",
|
||||||
EnableOwnerReferences: util.True(),
|
|
||||||
PodRoleLabel: "spilo-role",
|
PodRoleLabel: "spilo-role",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -514,39 +553,23 @@ func TestSyncStreams(t *testing.T) {
|
||||||
err = cluster.syncStream(appId)
|
err = cluster.syncStream(appId)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// create a second stream with same spec but with different name
|
// sync the stream again
|
||||||
createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create(
|
|
||||||
context.TODO(), fes, metav1.CreateOptions{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, createdStream.Spec.ApplicationId, appId)
|
|
||||||
|
|
||||||
// check that two streams exist
|
|
||||||
listOptions := metav1.ListOptions{
|
|
||||||
LabelSelector: cluster.labelsSet(true).String(),
|
|
||||||
}
|
|
||||||
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equalf(t, 2, len(streams.Items), "unexpected number of streams found: got %d, but expected only 2", len(streams.Items))
|
|
||||||
|
|
||||||
// sync the stream which should remove the redundant stream
|
|
||||||
err = cluster.syncStream(appId)
|
err = cluster.syncStream(appId)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// check that only one stream remains after sync
|
// check that only one stream remains after sync
|
||||||
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
listOptions := metav1.ListOptions{
|
||||||
|
LabelSelector: cluster.labelsSet(false).String(),
|
||||||
|
}
|
||||||
|
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items))
|
assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items))
|
||||||
|
|
||||||
// check owner references
|
|
||||||
if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) {
|
|
||||||
t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSameStreams(t *testing.T) {
|
func TestSameStreams(t *testing.T) {
|
||||||
testName := "TestSameStreams"
|
testName := "TestSameStreams"
|
||||||
annotationsA := map[string]string{"owned-by": "acid"}
|
annotationsA := map[string]string{constants.EventStreamMemoryAnnotationKey: "500Mi"}
|
||||||
annotationsB := map[string]string{"owned-by": "foo"}
|
annotationsB := map[string]string{constants.EventStreamMemoryAnnotationKey: "1Gi"}
|
||||||
|
|
||||||
stream1 := zalandov1.EventStream{
|
stream1 := zalandov1.EventStream{
|
||||||
EventStreamFlow: zalandov1.EventStreamFlow{},
|
EventStreamFlow: zalandov1.EventStreamFlow{},
|
||||||
|
|
@ -615,42 +638,49 @@ func TestSameStreams(t *testing.T) {
|
||||||
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil),
|
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil),
|
||||||
streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
|
streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
|
||||||
match: false,
|
match: false,
|
||||||
reason: "number of defined streams is different",
|
reason: "new streams EventStreams array does not match : number of defined streams is different",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "different number of streams",
|
subTest: "different number of streams",
|
||||||
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil),
|
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil),
|
||||||
streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
|
streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
|
||||||
match: false,
|
match: false,
|
||||||
reason: "number of defined streams is different",
|
reason: "new streams EventStreams array does not match : number of defined streams is different",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "event stream specs differ",
|
subTest: "event stream specs differ",
|
||||||
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
|
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
|
||||||
streamsB: fes,
|
streamsB: fes,
|
||||||
match: false,
|
match: false,
|
||||||
reason: "number of defined streams is different",
|
reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_CPU\" with value \"250m\"., new streams labels do not match the current ones, new streams EventStreams array does not match : number of defined streams is different",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "event stream recovery specs differ",
|
subTest: "event stream recovery specs differ",
|
||||||
streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil),
|
streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil),
|
||||||
streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, nil),
|
streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, nil),
|
||||||
match: false,
|
match: false,
|
||||||
reason: "event stream specs differ",
|
reason: "new streams EventStreams array does not match : event stream specs differ",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "event stream with new annotations",
|
||||||
|
streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil),
|
||||||
|
streamsB: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA),
|
||||||
|
match: false,
|
||||||
|
reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_MEMORY\" with value \"500Mi\".",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "event stream annotations differ",
|
subTest: "event stream annotations differ",
|
||||||
streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA),
|
streamsA: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsA),
|
||||||
streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsB),
|
streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsB),
|
||||||
match: false,
|
match: false,
|
||||||
reason: "event stream specs differ",
|
reason: "new streams annotations do not match: \"fes.zalando.org/FES_MEMORY\" changed from \"500Mi\" to \"1Gi\".",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
streamsMatch, matchReason := cluster.compareStreams(tt.streamsA, tt.streamsB)
|
streamsMatch, matchReason := cluster.compareStreams(tt.streamsA, tt.streamsB)
|
||||||
if streamsMatch != tt.match {
|
if streamsMatch != tt.match || matchReason != tt.reason {
|
||||||
t.Errorf("%s %s: unexpected match result when comparing streams: got %s, epxected %s",
|
t.Errorf("%s %s: unexpected match result when comparing streams: got %s, expected %s",
|
||||||
testName, tt.subTest, matchReason, tt.reason)
|
testName, tt.subTest, matchReason, tt.reason)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -658,6 +688,105 @@ func TestSameStreams(t *testing.T) {
|
||||||
|
|
||||||
func TestUpdateStreams(t *testing.T) {
|
func TestUpdateStreams(t *testing.T) {
|
||||||
pg.Name = fmt.Sprintf("%s-3", pg.Name)
|
pg.Name = fmt.Sprintf("%s-3", pg.Name)
|
||||||
|
var cluster = New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterLabels: map[string]string{"application": "spilo"},
|
||||||
|
ClusterNameLabel: "cluster-name",
|
||||||
|
DefaultCPURequest: "300m",
|
||||||
|
DefaultCPULimit: "300m",
|
||||||
|
DefaultMemoryRequest: "300Mi",
|
||||||
|
DefaultMemoryLimit: "300Mi",
|
||||||
|
EnableOwnerReferences: util.True(),
|
||||||
|
PodRoleLabel: "spilo-role",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, client, pg, logger, eventRecorder)
|
||||||
|
|
||||||
|
_, err := cluster.KubeClient.Postgresqls(namespace).Create(
|
||||||
|
context.TODO(), &pg, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// create stream with different owner reference
|
||||||
|
fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name)
|
||||||
|
fes.ObjectMeta.Labels["cluster-name"] = pg.Name
|
||||||
|
createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create(
|
||||||
|
context.TODO(), fes, metav1.CreateOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, createdStream.Spec.ApplicationId, appId)
|
||||||
|
|
||||||
|
// sync the stream which should update the owner reference
|
||||||
|
err = cluster.syncStream(appId)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// check that only one stream exists after sync
|
||||||
|
listOptions := metav1.ListOptions{
|
||||||
|
LabelSelector: cluster.labelsSet(true).String(),
|
||||||
|
}
|
||||||
|
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items))
|
||||||
|
|
||||||
|
// compare owner references
|
||||||
|
if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) {
|
||||||
|
t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences)
|
||||||
|
}
|
||||||
|
|
||||||
|
// change specs of streams and patch CRD
|
||||||
|
for i, stream := range pg.Spec.Streams {
|
||||||
|
if stream.ApplicationId == appId {
|
||||||
|
streamTable := stream.Tables["data.bar"]
|
||||||
|
streamTable.EventType = "stream-type-c"
|
||||||
|
stream.Tables["data.bar"] = streamTable
|
||||||
|
stream.BatchSize = k8sutil.UInt32ToPointer(uint32(250))
|
||||||
|
pg.Spec.Streams[i] = stream
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// compare stream returned from API with expected stream
|
||||||
|
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
|
||||||
|
result := cluster.generateFabricEventStream(appId)
|
||||||
|
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
|
||||||
|
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable recovery
|
||||||
|
for idx, stream := range pg.Spec.Streams {
|
||||||
|
if stream.ApplicationId == appId {
|
||||||
|
stream.EnableRecovery = util.False()
|
||||||
|
pg.Spec.Streams[idx] = stream
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
|
||||||
|
result = cluster.generateFabricEventStream(appId)
|
||||||
|
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
|
||||||
|
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) {
|
||||||
|
patchData, err := specPatch(pgSpec)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
|
||||||
|
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
cluster.Postgresql.Spec = pgPatched.Spec
|
||||||
|
err = cluster.syncStream(appId)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
return streams
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteStreams(t *testing.T) {
|
||||||
|
pg.Name = fmt.Sprintf("%s-4", pg.Name)
|
||||||
var cluster = New(
|
var cluster = New(
|
||||||
Config{
|
Config{
|
||||||
OpConfig: config.Config{
|
OpConfig: config.Config{
|
||||||
|
|
@ -695,7 +824,7 @@ func TestUpdateStreams(t *testing.T) {
|
||||||
|
|
||||||
// compare stream returned from API with expected stream
|
// compare stream returned from API with expected stream
|
||||||
listOptions := metav1.ListOptions{
|
listOptions := metav1.ListOptions{
|
||||||
LabelSelector: cluster.labelsSet(true).String(),
|
LabelSelector: cluster.labelsSet(false).String(),
|
||||||
}
|
}
|
||||||
streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
|
streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
|
||||||
result := cluster.generateFabricEventStream(appId)
|
result := cluster.generateFabricEventStream(appId)
|
||||||
|
|
@ -703,6 +832,14 @@ func TestUpdateStreams(t *testing.T) {
|
||||||
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
|
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// change teamId and check that stream is updated
|
||||||
|
pg.Spec.TeamID = "new-team"
|
||||||
|
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
|
||||||
|
result = cluster.generateFabricEventStream(appId)
|
||||||
|
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
|
||||||
|
t.Errorf("Malformed FabricEventStream after updating teamId, expected %#v, got %#v", streams.Items[0].ObjectMeta.Labels, result.ObjectMeta.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
// disable recovery
|
// disable recovery
|
||||||
for idx, stream := range pg.Spec.Streams {
|
for idx, stream := range pg.Spec.Streams {
|
||||||
if stream.ApplicationId == appId {
|
if stream.ApplicationId == appId {
|
||||||
|
|
@ -717,9 +854,6 @@ func TestUpdateStreams(t *testing.T) {
|
||||||
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
|
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
|
||||||
}
|
}
|
||||||
|
|
||||||
mockClient := k8sutil.NewMockKubernetesClient()
|
|
||||||
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
|
|
||||||
|
|
||||||
// remove streams from manifest
|
// remove streams from manifest
|
||||||
pg.Spec.Streams = nil
|
pg.Spec.Streams = nil
|
||||||
pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update(
|
pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update(
|
||||||
|
|
@ -729,26 +863,29 @@ func TestUpdateStreams(t *testing.T) {
|
||||||
appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams)
|
appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams)
|
||||||
cluster.cleanupRemovedStreams(appIds)
|
cluster.cleanupRemovedStreams(appIds)
|
||||||
|
|
||||||
|
// check that streams have been deleted
|
||||||
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||||
if len(streams.Items) > 0 || err != nil {
|
assert.NoError(t, err)
|
||||||
t.Errorf("stream resource has not been removed or unexpected error %v", err)
|
assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items))
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) {
|
// create stream to test deleteStreams code
|
||||||
patchData, err := specPatch(pgSpec)
|
fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name)
|
||||||
|
fes.ObjectMeta.Labels["cluster-name"] = pg.Name
|
||||||
|
_, err = cluster.KubeClient.FabricEventStreams(namespace).Create(
|
||||||
|
context.TODO(), fes, metav1.CreateOptions{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
|
// sync it once to cluster struct
|
||||||
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
cluster.Postgresql.Spec = pgPatched.Spec
|
|
||||||
err = cluster.syncStream(appId)
|
err = cluster.syncStream(appId)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// we need a mock client because deleteStreams checks for CRD existance
|
||||||
|
mockClient := k8sutil.NewMockKubernetesClient()
|
||||||
|
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
|
||||||
|
cluster.deleteStreams()
|
||||||
|
|
||||||
|
// check that streams have been deleted
|
||||||
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items))
|
||||||
return streams
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -151,7 +151,7 @@ func (c *Cluster) populateVolumeMetaData() error {
|
||||||
volumeIds := []string{}
|
volumeIds := []string{}
|
||||||
var volumeID string
|
var volumeID string
|
||||||
for _, pv := range pvs {
|
for _, pv := range pvs {
|
||||||
volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
|
volumeID, err = c.VolumeResizer.GetProviderVolumeID(pv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -216,6 +216,12 @@ func TestMigrateEBS(t *testing.T) {
|
||||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
|
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
|
||||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
||||||
|
|
||||||
|
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
|
||||||
|
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
|
||||||
|
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
|
||||||
|
}).
|
||||||
|
Times(2)
|
||||||
|
|
||||||
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
|
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
|
||||||
[]volumes.VolumeProperties{
|
[]volumes.VolumeProperties{
|
||||||
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 100},
|
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 100},
|
||||||
|
|
@ -322,6 +328,12 @@ func TestMigrateGp3Support(t *testing.T) {
|
||||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
||||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil)
|
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil)
|
||||||
|
|
||||||
|
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
|
||||||
|
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
|
||||||
|
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
|
||||||
|
}).
|
||||||
|
Times(3)
|
||||||
|
|
||||||
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return(
|
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return(
|
||||||
[]volumes.VolumeProperties{
|
[]volumes.VolumeProperties{
|
||||||
{VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000},
|
{VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000},
|
||||||
|
|
@ -377,6 +389,12 @@ func TestManualGp2Gp3Support(t *testing.T) {
|
||||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
|
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
|
||||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
||||||
|
|
||||||
|
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
|
||||||
|
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
|
||||||
|
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
|
||||||
|
}).
|
||||||
|
Times(2)
|
||||||
|
|
||||||
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
|
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
|
||||||
[]volumes.VolumeProperties{
|
[]volumes.VolumeProperties{
|
||||||
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},
|
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},
|
||||||
|
|
@ -436,6 +454,12 @@ func TestDontTouchType(t *testing.T) {
|
||||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
|
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
|
||||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
||||||
|
|
||||||
|
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
|
||||||
|
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
|
||||||
|
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
|
||||||
|
}).
|
||||||
|
Times(2)
|
||||||
|
|
||||||
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
|
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
|
||||||
[]volumes.VolumeProperties{
|
[]volumes.VolumeProperties{
|
||||||
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},
|
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix
|
result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix
|
||||||
result.EtcdHost = fromCRD.EtcdHost
|
result.EtcdHost = fromCRD.EtcdHost
|
||||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-16:3.3-p1")
|
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p2")
|
||||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||||
result.MinInstances = fromCRD.MinInstances
|
result.MinInstances = fromCRD.MinInstances
|
||||||
result.MaxInstances = fromCRD.MaxInstances
|
result.MaxInstances = fromCRD.MaxInstances
|
||||||
|
|
@ -62,8 +62,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
// major version upgrade config
|
// major version upgrade config
|
||||||
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "manual")
|
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "manual")
|
||||||
result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList
|
result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList
|
||||||
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "12")
|
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "13")
|
||||||
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "16")
|
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "17")
|
||||||
|
|
||||||
// kubernetes config
|
// kubernetes config
|
||||||
result.EnableOwnerReferences = util.CoalesceBool(fromCRD.Kubernetes.EnableOwnerReferences, util.False())
|
result.EnableOwnerReferences = util.CoalesceBool(fromCRD.Kubernetes.EnableOwnerReferences, util.False())
|
||||||
|
|
@ -181,7 +181,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
|
|
||||||
// logical backup config
|
// logical backup config
|
||||||
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
||||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0")
|
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0")
|
||||||
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
||||||
result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName
|
result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName
|
||||||
result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey
|
result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey
|
||||||
|
|
|
||||||
|
|
@ -122,6 +122,9 @@ type ControllerConfig struct {
|
||||||
IgnoredAnnotations []string
|
IgnoredAnnotations []string
|
||||||
|
|
||||||
EnableJsonLogging bool
|
EnableJsonLogging bool
|
||||||
|
|
||||||
|
KubeQPS int
|
||||||
|
KubeBurst int
|
||||||
}
|
}
|
||||||
|
|
||||||
// cached value for the GetOperatorNamespace
|
// cached value for the GetOperatorNamespace
|
||||||
|
|
|
||||||
|
|
@ -128,7 +128,7 @@ type Scalyr struct {
|
||||||
// LogicalBackup defines configuration for logical backup
|
// LogicalBackup defines configuration for logical backup
|
||||||
type LogicalBackup struct {
|
type LogicalBackup struct {
|
||||||
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
|
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
|
||||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"`
|
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"`
|
||||||
LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"`
|
LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"`
|
||||||
LogicalBackupAzureStorageAccountName string `name:"logical_backup_azure_storage_account_name" default:""`
|
LogicalBackupAzureStorageAccountName string `name:"logical_backup_azure_storage_account_name" default:""`
|
||||||
LogicalBackupAzureStorageContainer string `name:"logical_backup_azure_storage_container" default:""`
|
LogicalBackupAzureStorageContainer string `name:"logical_backup_azure_storage_container" default:""`
|
||||||
|
|
@ -176,7 +176,7 @@ type Config struct {
|
||||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||||
DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-16:3.3-p1"`
|
DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-17:4.0-p2"`
|
||||||
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
|
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
|
||||||
SidecarContainers []v1.Container `name:"sidecars"`
|
SidecarContainers []v1.Container `name:"sidecars"`
|
||||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||||
|
|
@ -247,8 +247,8 @@ type Config struct {
|
||||||
EnableTeamIdClusternamePrefix bool `name:"enable_team_id_clustername_prefix" default:"false"`
|
EnableTeamIdClusternamePrefix bool `name:"enable_team_id_clustername_prefix" default:"false"`
|
||||||
MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"manual"`
|
MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"manual"`
|
||||||
MajorVersionUpgradeTeamAllowList []string `name:"major_version_upgrade_team_allow_list" default:""`
|
MajorVersionUpgradeTeamAllowList []string `name:"major_version_upgrade_team_allow_list" default:""`
|
||||||
MinimalMajorVersion string `name:"minimal_major_version" default:"12"`
|
MinimalMajorVersion string `name:"minimal_major_version" default:"13"`
|
||||||
TargetMajorVersion string `name:"target_major_version" default:"16"`
|
TargetMajorVersion string `name:"target_major_version" default:"17"`
|
||||||
PatroniAPICheckInterval time.Duration `name:"patroni_api_check_interval" default:"1s"`
|
PatroniAPICheckInterval time.Duration `name:"patroni_api_check_interval" default:"1s"`
|
||||||
PatroniAPICheckTimeout time.Duration `name:"patroni_api_check_timeout" default:"5s"`
|
PatroniAPICheckTimeout time.Duration `name:"patroni_api_check_timeout" default:"5s"`
|
||||||
EnablePatroniFailsafeMode *bool `name:"enable_patroni_failsafe_mode" default:"false"`
|
EnablePatroniFailsafeMode *bool `name:"enable_patroni_failsafe_mode" default:"false"`
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ const (
|
||||||
// EBS related constants
|
// EBS related constants
|
||||||
EBSVolumeIDStart = "/vol-"
|
EBSVolumeIDStart = "/vol-"
|
||||||
EBSProvisioner = "kubernetes.io/aws-ebs"
|
EBSProvisioner = "kubernetes.io/aws-ebs"
|
||||||
|
EBSDriver = "ebs.csi.aws.com"
|
||||||
//https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VolumeModification.html
|
//https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VolumeModification.html
|
||||||
EBSVolumeStateModifying = "modifying"
|
EBSVolumeStateModifying = "modifying"
|
||||||
EBSVolumeStateOptimizing = "optimizing"
|
EBSVolumeStateOptimizing = "optimizing"
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,10 @@ const (
|
||||||
EventStreamSourceAuthType = "DatabaseAuthenticationSecret"
|
EventStreamSourceAuthType = "DatabaseAuthenticationSecret"
|
||||||
EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent"
|
EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent"
|
||||||
EventStreamSinkNakadiType = "Nakadi"
|
EventStreamSinkNakadiType = "Nakadi"
|
||||||
EventStreamRecoveryNoneType = "None"
|
|
||||||
EventStreamRecoveryDLQType = "DeadLetter"
|
EventStreamRecoveryDLQType = "DeadLetter"
|
||||||
|
EventStreamRecoveryIgnoreType = "Ignore"
|
||||||
|
EventStreamRecoveryNoneType = "None"
|
||||||
EventStreamRecoverySuffix = "dead-letter-queue"
|
EventStreamRecoverySuffix = "dead-letter-queue"
|
||||||
|
EventStreamCpuAnnotationKey = "fes.zalando.org/FES_CPU"
|
||||||
|
EventStreamMemoryAnnotationKey = "fes.zalando.org/FES_MEMORY"
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,8 @@ func (r *EBSVolumeResizer) IsConnectedToProvider() bool {
|
||||||
|
|
||||||
// VolumeBelongsToProvider checks if the given persistent volume is backed by EBS.
|
// VolumeBelongsToProvider checks if the given persistent volume is backed by EBS.
|
||||||
func (r *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool {
|
func (r *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool {
|
||||||
return pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner
|
return (pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner) ||
|
||||||
|
(pv.Spec.CSI != nil && pv.Spec.CSI.Driver == constants.EBSDriver)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtractVolumeID extracts volumeID from "aws://eu-central-1a/vol-075ddfc4a127d0bd4"
|
// ExtractVolumeID extracts volumeID from "aws://eu-central-1a/vol-075ddfc4a127d0bd4"
|
||||||
|
|
@ -54,7 +55,12 @@ func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) {
|
||||||
|
|
||||||
// GetProviderVolumeID converts aws://eu-central-1b/vol-00f93d4827217c629 to vol-00f93d4827217c629 for EBS volumes
|
// GetProviderVolumeID converts aws://eu-central-1b/vol-00f93d4827217c629 to vol-00f93d4827217c629 for EBS volumes
|
||||||
func (r *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) {
|
func (r *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) {
|
||||||
volumeID := pv.Spec.AWSElasticBlockStore.VolumeID
|
var volumeID string = ""
|
||||||
|
if pv.Spec.CSI != nil {
|
||||||
|
volumeID = pv.Spec.CSI.VolumeHandle
|
||||||
|
} else if pv.Spec.AWSElasticBlockStore != nil {
|
||||||
|
volumeID = pv.Spec.AWSElasticBlockStore.VolumeID
|
||||||
|
}
|
||||||
if volumeID == "" {
|
if volumeID == "" {
|
||||||
return "", fmt.Errorf("got empty volume id for volume %v", pv)
|
return "", fmt.Errorf("got empty volume id for volume %v", pv)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,123 @@
|
||||||
|
package volumes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetProviderVolumeID(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
pv *v1.PersistentVolume
|
||||||
|
expected string
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "CSI volume handle",
|
||||||
|
pv: &v1.PersistentVolume{
|
||||||
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||||
|
CSI: &v1.CSIPersistentVolumeSource{
|
||||||
|
VolumeHandle: "vol-075ddfc4a127d0bd5",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: "vol-075ddfc4a127d0bd5",
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AWS EBS volume handle",
|
||||||
|
pv: &v1.PersistentVolume{
|
||||||
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||||
|
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||||
|
VolumeID: "aws://eu-central-1a/vol-075ddfc4a127d0bd4",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: "vol-075ddfc4a127d0bd4",
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty volume handle",
|
||||||
|
pv: &v1.PersistentVolume{
|
||||||
|
Spec: v1.PersistentVolumeSpec{},
|
||||||
|
},
|
||||||
|
expected: "",
|
||||||
|
err: fmt.Errorf("got empty volume id for volume %v", &v1.PersistentVolume{}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
resizer := EBSVolumeResizer{}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
volumeID, err := resizer.GetProviderVolumeID(tt.pv)
|
||||||
|
if volumeID != tt.expected || (err != nil && err.Error() != tt.err.Error()) {
|
||||||
|
t.Errorf("expected %v, got %v, expected err %v, got %v", tt.expected, volumeID, tt.err, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVolumeBelongsToProvider(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
pv *v1.PersistentVolume
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "CSI volume handle",
|
||||||
|
pv: &v1.PersistentVolume{
|
||||||
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||||
|
CSI: &v1.CSIPersistentVolumeSource{
|
||||||
|
Driver: "ebs.csi.aws.com",
|
||||||
|
VolumeHandle: "vol-075ddfc4a127d0bd5",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AWS EBS volume handle",
|
||||||
|
pv: &v1.PersistentVolume{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Annotations: map[string]string {
|
||||||
|
"pv.kubernetes.io/provisioned-by": "kubernetes.io/aws-ebs",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||||
|
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||||
|
VolumeID: "aws://eu-central-1a/vol-075ddfc4a127d0bd4",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty volume source",
|
||||||
|
pv: &v1.PersistentVolume{
|
||||||
|
Spec: v1.PersistentVolumeSpec{},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
resizer := EBSVolumeResizer{}
|
||||||
|
isProvider := resizer.VolumeBelongsToProvider(tt.pv)
|
||||||
|
if isProvider != tt.expected {
|
||||||
|
t.Errorf("expected %v, got %v", tt.expected, isProvider)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "postgres-operator-ui",
|
"name": "postgres-operator-ui",
|
||||||
"version": "1.13.0",
|
"version": "1.14.0",
|
||||||
"description": "PostgreSQL Operator UI",
|
"description": "PostgreSQL Operator UI",
|
||||||
"main": "src/app.js",
|
"main": "src/app.js",
|
||||||
"config": {
|
"config": {
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ spec:
|
||||||
serviceAccountName: postgres-operator-ui
|
serviceAccountName: postgres-operator-ui
|
||||||
containers:
|
containers:
|
||||||
- name: "service"
|
- name: "service"
|
||||||
image: ghcr.io/zalando/postgres-operator-ui:v1.13.0
|
image: ghcr.io/zalando/postgres-operator-ui:v1.14.0
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8081
|
- containerPort: 8081
|
||||||
protocol: "TCP"
|
protocol: "TCP"
|
||||||
|
|
@ -73,11 +73,11 @@ spec:
|
||||||
"limit_iops": 16000,
|
"limit_iops": 16000,
|
||||||
"limit_throughput": 1000,
|
"limit_throughput": 1000,
|
||||||
"postgresql_versions": [
|
"postgresql_versions": [
|
||||||
|
"17",
|
||||||
"16",
|
"16",
|
||||||
"15",
|
"15",
|
||||||
"14",
|
"14",
|
||||||
"13",
|
"13"
|
||||||
"12"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
# Exemple of settings to make snapshot view working in the ui when using AWS
|
# Exemple of settings to make snapshot view working in the ui when using AWS
|
||||||
|
|
|
||||||
|
|
@ -267,7 +267,7 @@ DEFAULT_UI_CONFIG = {
|
||||||
'users_visible': True,
|
'users_visible': True,
|
||||||
'databases_visible': True,
|
'databases_visible': True,
|
||||||
'resources_visible': RESOURCES_VISIBLE,
|
'resources_visible': RESOURCES_VISIBLE,
|
||||||
'postgresql_versions': ['12', '13', '14', '15', '16'],
|
'postgresql_versions': ['13', '14', '15', '16', '17'],
|
||||||
'dns_format_string': '{0}.{1}',
|
'dns_format_string': '{0}.{1}',
|
||||||
'pgui_link': '',
|
'pgui_link': '',
|
||||||
'static_network_whitelist': {},
|
'static_network_whitelist': {},
|
||||||
|
|
|
||||||
|
|
@ -305,7 +305,7 @@ def read_versions(
|
||||||
if uid == 'wal' or defaulting(lambda: UUID(uid))
|
if uid == 'wal' or defaulting(lambda: UUID(uid))
|
||||||
]
|
]
|
||||||
|
|
||||||
BACKUP_VERSION_PREFIXES = ['', '10/', '11/', '12/', '13/', '14/', '15/', '16/']
|
BACKUP_VERSION_PREFIXES = ['', '10/', '11/', '12/', '13/', '14/', '15/', '16/', '17/']
|
||||||
|
|
||||||
def read_basebackups(
|
def read_basebackups(
|
||||||
pg_cluster,
|
pg_cluster,
|
||||||
|
|
|
||||||
|
|
@ -31,11 +31,11 @@ default_operator_ui_config='{
|
||||||
"limit_iops": 16000,
|
"limit_iops": 16000,
|
||||||
"limit_throughput": 1000,
|
"limit_throughput": 1000,
|
||||||
"postgresql_versions": [
|
"postgresql_versions": [
|
||||||
|
"17",
|
||||||
"16",
|
"16",
|
||||||
"15",
|
"15",
|
||||||
"14",
|
"14",
|
||||||
"13",
|
"13"
|
||||||
"12"
|
|
||||||
],
|
],
|
||||||
"static_network_whitelist": {
|
"static_network_whitelist": {
|
||||||
"localhost": ["172.0.0.1/32"]
|
"localhost": ["172.0.0.1/32"]
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue