Compare commits
26 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
1af4c50ed0 | |
|
|
3bc244fe39 | |
|
|
8c2a290a12 | |
|
|
3a85466cfd | |
|
|
eddf521227 | |
|
|
8ba57b28f5 | |
|
|
dc29425969 | |
|
|
bcd729b2cc | |
|
|
d98fc2753a | |
|
|
cce2633192 | |
|
|
ad7e590916 | |
|
|
fa4bc21538 | |
|
|
51135b07db | |
|
|
ccb52c094d | |
|
|
68c4b49636 | |
|
|
c7a586d0f8 | |
|
|
746df0d33d | |
|
|
2a4be1cb39 | |
|
|
c8063eb78a | |
|
|
a56ecaace7 | |
|
|
f49b4f1e97 | |
|
|
b0cfeb30ea | |
|
|
e04b91d8af | |
|
|
8522331cf2 | |
|
|
46d5ebef6d | |
|
|
4430aba3f3 |
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "^1.23.4"
|
||||
go-version: "^1.25.3"
|
||||
|
||||
- name: Run unit tests
|
||||
run: make deps mocks test
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "^1.23.4"
|
||||
go-version: "^1.25.3"
|
||||
- name: Make dependencies
|
||||
run: make deps mocks
|
||||
- name: Code generation
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "^1.23.4"
|
||||
go-version: "^1.25.3"
|
||||
- name: Make dependencies
|
||||
run: make deps mocks
|
||||
- name: Compile
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
# global owners
|
||||
* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet @macedigital
|
||||
* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet
|
||||
|
|
|
|||
2
LICENSE
2
LICENSE
|
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2024 Zalando SE
|
||||
Copyright (c) 2025 Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -4,4 +4,3 @@ Jan Mussler <jan.mussler@zalando.de>
|
|||
Jociele Padilha <jociele.padilha@zalando.de>
|
||||
Ida Novindasari <ida.novindasari@zalando.de>
|
||||
Polina Bungina <polina.bungina@zalando.de>
|
||||
Matthias Adler <matthias.adler@zalando.de>
|
||||
|
|
|
|||
6
Makefile
6
Makefile
|
|
@ -43,7 +43,7 @@ ifndef GOPATH
|
|||
endif
|
||||
|
||||
PATH := $(GOPATH)/bin:$(PATH)
|
||||
SHELL := env PATH=$(PATH) $(SHELL)
|
||||
SHELL := env PATH="$(PATH)" $(SHELL)
|
||||
|
||||
default: local
|
||||
|
||||
|
|
@ -69,7 +69,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE}
|
|||
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" .
|
||||
|
||||
indocker-race:
|
||||
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.23.4 bash -c "make linux"
|
||||
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.25.3 bash -c "make linux"
|
||||
|
||||
push:
|
||||
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
|
||||
|
|
@ -78,7 +78,7 @@ mocks:
|
|||
GO111MODULE=on go generate ./...
|
||||
|
||||
tools:
|
||||
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.30.4
|
||||
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.32.9
|
||||
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
|
|
|
|||
10
README.md
10
README.md
|
|
@ -17,6 +17,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
|||
* Live volume resize without pod restarts (AWS EBS, PVC)
|
||||
* Database connection pooling with PGBouncer
|
||||
* Support fast in place major version upgrade. Supports global upgrade of all clusters.
|
||||
* Pod protection during boostrap phase and configurable maintenance windows
|
||||
* Restore and cloning Postgres clusters on AWS, GCS and Azure
|
||||
* Additionally logical backups to S3 or GCS bucket can be configured
|
||||
* Standby cluster from S3 or GCS WAL archive
|
||||
|
|
@ -32,7 +33,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
|||
* Streaming replication cluster via Patroni
|
||||
* Point-In-Time-Recovery with
|
||||
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) /
|
||||
[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
|
||||
[WAL-G](https://github.com/wal-g/wal-g) or [WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
|
||||
* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),
|
||||
[pg_stat_statements](https://www.postgresql.org/docs/17/pgstatstatements.html),
|
||||
[pgextwlist](https://github.com/dimitri/pgextwlist),
|
||||
|
|
@ -41,12 +42,17 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
|||
[decoderbufs](https://github.com/debezium/postgres-decoderbufs),
|
||||
[hypopg](https://github.com/HypoPG/hypopg),
|
||||
[pg_cron](https://github.com/citusdata/pg_cron),
|
||||
[pg_repack](https://github.com/reorg/pg_repack),
|
||||
[pg_partman](https://github.com/pgpartman/pg_partman),
|
||||
[pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache),
|
||||
[pg_audit](https://github.com/pgaudit/pgaudit),
|
||||
[pgfaceting](https://github.com/cybertec-postgresql/pgfaceting),
|
||||
[pgq](https://github.com/pgq/pgq),
|
||||
[pgvector](https://github.com/pgvector/pgvector),
|
||||
[plpgsql_check](https://github.com/okbob/plpgsql_check),
|
||||
[plproxy](https://github.com/plproxy/plproxy),
|
||||
[postgis](https://postgis.net/),
|
||||
[roaringbitmap](https://github.com/ChenHuajun/pg_roaringbitmap),
|
||||
[set_user](https://github.com/pgaudit/set_user) and
|
||||
[timescaledb](https://github.com/timescale/timescaledb)
|
||||
|
||||
|
|
@ -57,12 +63,12 @@ production for over five years.
|
|||
|
||||
| Release | Postgres versions | K8s versions | Golang |
|
||||
| :-------- | :---------------: | :---------------: | :-----: |
|
||||
| v1.15.0 | 13 → 17 | 1.27+ | 1.25.3 |
|
||||
| v1.14.0 | 13 → 17 | 1.27+ | 1.23.4 |
|
||||
| v1.13.0 | 12 → 16 | 1.27+ | 1.22.5 |
|
||||
| v1.12.0 | 11 → 16 | 1.27+ | 1.22.3 |
|
||||
| v1.11.0 | 11 → 16 | 1.27+ | 1.21.7 |
|
||||
| v1.10.1 | 10 → 15 | 1.21+ | 1.19.8 |
|
||||
| v1.9.0 | 10 → 15 | 1.21+ | 1.18.9 |
|
||||
|
||||
## Getting started
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: postgres-operator-ui
|
||||
version: 1.14.0
|
||||
appVersion: 1.14.0
|
||||
version: 1.15.0
|
||||
appVersion: 1.15.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,32 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator-ui:
|
||||
- apiVersion: v2
|
||||
appVersion: 1.15.0
|
||||
created: "2025-10-16T11:34:57.912432565+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: d82b5fb7c3d4fd8b106343b2f9472cba5e6050315ab3c520a79366f2b2f20c7a
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.15.0.tgz
|
||||
version: 1.15.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.14.0
|
||||
created: "2024-12-23T11:26:07.721761867+01:00"
|
||||
created: "2025-10-16T11:34:57.906677165+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: e87ed898079a852957a67a4caf3fbd27b9098e413f5d961b7a771a6ae8b3e17c
|
||||
|
|
@ -26,7 +49,7 @@ entries:
|
|||
version: 1.14.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.13.0
|
||||
created: "2024-12-23T11:26:07.719409282+01:00"
|
||||
created: "2025-10-16T11:34:57.904106882+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8
|
||||
|
|
@ -49,7 +72,7 @@ entries:
|
|||
version: 1.13.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.12.2
|
||||
created: "2024-12-23T11:26:07.717202918+01:00"
|
||||
created: "2025-10-16T11:34:57.901526106+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd
|
||||
|
|
@ -72,7 +95,7 @@ entries:
|
|||
version: 1.12.2
|
||||
- apiVersion: v2
|
||||
appVersion: 1.11.0
|
||||
created: "2024-12-23T11:26:07.714792146+01:00"
|
||||
created: "2025-10-16T11:34:57.898843691+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2
|
||||
|
|
@ -95,7 +118,7 @@ entries:
|
|||
version: 1.11.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.10.1
|
||||
created: "2024-12-23T11:26:07.712194397+01:00"
|
||||
created: "2025-10-16T11:34:57.896283083+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce
|
||||
|
|
@ -116,27 +139,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-ui-1.10.1.tgz
|
||||
version: 1.10.1
|
||||
- apiVersion: v2
|
||||
appVersion: 1.9.0
|
||||
created: "2024-12-23T11:26:07.723891496+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.9.0.tgz
|
||||
version: 1.9.0
|
||||
generated: "2024-12-23T11:26:07.709192608+01:00"
|
||||
generated: "2025-10-16T11:34:57.893034861+02:00"
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||
image:
|
||||
registry: ghcr.io
|
||||
repository: zalando/postgres-operator-ui
|
||||
tag: v1.14.0
|
||||
tag: v1.15.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -62,8 +62,6 @@ podAnnotations:
|
|||
extraEnvs:
|
||||
[]
|
||||
# Exemple of settings to make snapshot view working in the ui when using AWS
|
||||
# - name: WALE_S3_ENDPOINT
|
||||
# value: https+path://s3.us-east-1.amazonaws.com:443
|
||||
# - name: SPILO_S3_BACKUP_PREFIX
|
||||
# value: spilo/
|
||||
# - name: AWS_ACCESS_KEY_ID
|
||||
|
|
@ -83,8 +81,6 @@ extraEnvs:
|
|||
# key: AWS_DEFAULT_REGION
|
||||
# - name: SPILO_S3_BACKUP_BUCKET
|
||||
# value: <s3 bucket used by the operator>
|
||||
# - name: "USE_AWS_INSTANCE_PROFILE"
|
||||
# value: "true"
|
||||
|
||||
# configure UI service
|
||||
service:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: postgres-operator
|
||||
version: 1.14.0
|
||||
appVersion: 1.14.0
|
||||
version: 1.15.0
|
||||
appVersion: 1.15.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ spec:
|
|||
type: string
|
||||
docker_image:
|
||||
type: string
|
||||
default: "ghcr.io/zalando/spilo-17:4.0-p2"
|
||||
default: "ghcr.io/zalando/spilo-17:4.0-p3"
|
||||
enable_crd_registration:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
|
|||
|
|
@ -1,9 +1,31 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator:
|
||||
- apiVersion: v2
|
||||
appVersion: 1.15.0
|
||||
created: "2025-10-16T11:35:38.533627038+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 002dd47647bf51fbba023bd1762d807be478cf37de7a44b80cd01ac1f20bd94a
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.15.0.tgz
|
||||
version: 1.15.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.14.0
|
||||
created: "2024-12-23T11:25:32.596716566+01:00"
|
||||
created: "2025-10-16T11:35:38.52489216+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 36e1571f3f455b213f16cdda7b1158648e8e84deb804ba47ed6b9b6d19263ba8
|
||||
|
|
@ -25,7 +47,7 @@ entries:
|
|||
version: 1.14.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.13.0
|
||||
created: "2024-12-23T11:25:32.591136261+01:00"
|
||||
created: "2025-10-16T11:35:38.517347652+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef
|
||||
|
|
@ -47,7 +69,7 @@ entries:
|
|||
version: 1.13.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.12.2
|
||||
created: "2024-12-23T11:25:32.585419709+01:00"
|
||||
created: "2025-10-16T11:35:38.510819005+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8
|
||||
|
|
@ -69,7 +91,7 @@ entries:
|
|||
version: 1.12.2
|
||||
- apiVersion: v2
|
||||
appVersion: 1.11.0
|
||||
created: "2024-12-23T11:25:32.580077286+01:00"
|
||||
created: "2025-10-16T11:35:38.503781253+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9
|
||||
|
|
@ -91,7 +113,7 @@ entries:
|
|||
version: 1.11.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.10.1
|
||||
created: "2024-12-23T11:25:32.574641578+01:00"
|
||||
created: "2025-10-16T11:35:38.494366224+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c
|
||||
|
|
@ -111,26 +133,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-1.10.1.tgz
|
||||
version: 1.10.1
|
||||
- apiVersion: v2
|
||||
appVersion: 1.9.0
|
||||
created: "2024-12-23T11:25:32.604748814+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.9.0.tgz
|
||||
version: 1.9.0
|
||||
generated: "2024-12-23T11:25:32.568598763+01:00"
|
||||
generated: "2025-10-16T11:35:38.487472753+02:00"
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: ghcr.io
|
||||
repository: zalando/postgres-operator
|
||||
tag: v1.14.0
|
||||
tag: v1.15.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -38,7 +38,7 @@ configGeneral:
|
|||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||
etcd_host: ""
|
||||
# Spilo docker image
|
||||
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
|
||||
docker_image: ghcr.io/zalando/spilo-17:4.0-p3
|
||||
|
||||
# key name for annotation to ignore globally configured instance limits
|
||||
# ignore_instance_limits_annotation_key: ""
|
||||
|
|
@ -364,7 +364,7 @@ configLogicalBackup:
|
|||
# logical_backup_memory_request: ""
|
||||
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0"
|
||||
# path of google cloud service account json file
|
||||
# logical_backup_google_application_credentials: ""
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.23-alpine
|
||||
FROM golang:1.25-alpine
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
# We need root certificates to deal with teams api over https
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest
|
||||
FROM golang:1.23-alpine AS builder
|
||||
FROM golang:1.25-alpine AS builder
|
||||
ARG VERSION=latest
|
||||
|
||||
COPY . /go/src/github.com/zalando/postgres-operator
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ apt-get install -y wget
|
|||
|
||||
(
|
||||
cd /tmp
|
||||
wget -q "https://storage.googleapis.com/golang/go1.23.4.linux-${arch}.tar.gz" -O go.tar.gz
|
||||
wget -q "https://storage.googleapis.com/golang/go1.25.3.linux-${arch}.tar.gz" -O go.tar.gz
|
||||
tar -xf go.tar.gz
|
||||
mv go /usr/local
|
||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||
|
|
|
|||
|
|
@ -195,12 +195,14 @@ from numerous escape characters in the latter log entry, view it in CLI with
|
|||
used internally in K8s.
|
||||
|
||||
The StatefulSet is replaced if the following properties change:
|
||||
|
||||
- annotations
|
||||
- volumeClaimTemplates
|
||||
- template volumes
|
||||
|
||||
The StatefulSet is replaced and a rolling updates is triggered if the following
|
||||
properties differ between the old and new state:
|
||||
|
||||
- container name, ports, image, resources, env, envFrom, securityContext and volumeMounts
|
||||
- template labels, annotations, service account, securityContext, affinity, priority class and termination grace period
|
||||
|
||||
|
|
@ -384,7 +386,7 @@ exceptions:
|
|||
The interval of days can be set with `password_rotation_interval` (default
|
||||
`90` = 90 days, minimum 1). On each rotation the user name and password values
|
||||
are replaced in the K8s secret. They belong to a newly created user named after
|
||||
the original role plus rotation date in YYMMDD format. All priviliges are
|
||||
the original role plus rotation date in YYMMDD format. All privileges are
|
||||
inherited meaning that migration scripts should still grant and revoke rights
|
||||
against the original role. The timestamp of the next rotation (in RFC 3339
|
||||
format, UTC timezone) is written to the secret as well. Note, if the rotation
|
||||
|
|
@ -564,7 +566,7 @@ manifest affinity.
|
|||
```
|
||||
|
||||
If `node_readiness_label_merge` is set to `"OR"` (default) the readiness label
|
||||
affinty will be appended with its own expressions block:
|
||||
affinity will be appended with its own expressions block:
|
||||
|
||||
```yaml
|
||||
affinity:
|
||||
|
|
@ -620,22 +622,34 @@ By default the topology key for the pod anti affinity is set to
|
|||
`kubernetes.io/hostname`, you can set another topology key e.g.
|
||||
`failure-domain.beta.kubernetes.io/zone`. See [built-in node labels](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels) for available topology keys.
|
||||
|
||||
## Pod Disruption Budget
|
||||
## Pod Disruption Budgets
|
||||
|
||||
By default the operator uses a PodDisruptionBudget (PDB) to protect the cluster
|
||||
from voluntarily disruptions and hence unwanted DB downtime. The `MinAvailable`
|
||||
parameter of the PDB is set to `1` which prevents killing masters in single-node
|
||||
clusters and/or the last remaining running instance in a multi-node cluster.
|
||||
By default the operator creates two PodDisruptionBudgets (PDB) to protect the cluster
|
||||
from voluntarily disruptions and hence unwanted DB downtime: so-called primary PDB and
|
||||
and PDB for critical operations.
|
||||
|
||||
### Primary PDB
|
||||
The `MinAvailable` parameter of this PDB is set to `1` and, if `pdb_master_label_selector`
|
||||
is enabled, label selector includes `spilo-role=master` condition, which prevents killing
|
||||
masters in single-node clusters and/or the last remaining running instance in a multi-node
|
||||
cluster.
|
||||
|
||||
## PDB for critical operations
|
||||
The `MinAvailable` parameter of this PDB is equal to the `numberOfInstances` set in the
|
||||
cluster manifest, while label selector includes `critical-operation=true` condition. This
|
||||
allows to protect all pods of a cluster, given they are labeled accordingly.
|
||||
For example, Operator labels all Spilo pods with `critical-operation=true` during the major
|
||||
version upgrade run. You may want to protect cluster pods during other critical operations
|
||||
by assigning the label to pods yourself or using other means of automation.
|
||||
|
||||
The PDB is only relaxed in two scenarios:
|
||||
|
||||
* If a cluster is scaled down to `0` instances (e.g. for draining nodes)
|
||||
* If the PDB is disabled in the configuration (`enable_pod_disruption_budget`)
|
||||
|
||||
The PDB is still in place having `MinAvailable` set to `0`. If enabled it will
|
||||
be automatically set to `1` on scale up. Disabling PDBs helps avoiding blocking
|
||||
Kubernetes upgrades in managed K8s environments at the cost of prolonged DB
|
||||
downtime. See PR [#384](https://github.com/zalando/postgres-operator/pull/384)
|
||||
The PDBs are still in place having `MinAvailable` set to `0`. Disabling PDBs
|
||||
helps avoiding blocking Kubernetes upgrades in managed K8s environments at the
|
||||
cost of prolonged DB downtime. See PR [#384](https://github.com/zalando/postgres-operator/pull/384)
|
||||
for the use case.
|
||||
|
||||
## Add cluster-specific labels
|
||||
|
|
@ -886,6 +900,7 @@ services:
|
|||
There are multiple options to specify service annotations that will be merged
|
||||
with each other and override in the following order (where latter take
|
||||
precedence):
|
||||
|
||||
1. Default annotations if LoadBalancer is enabled
|
||||
2. Globally configured `custom_service_annotations`
|
||||
3. `serviceAnnotations` specified in the cluster manifest
|
||||
|
|
@ -1128,7 +1143,7 @@ metadata:
|
|||
iam.gke.io/gcp-service-account: <GCP_SERVICE_ACCOUNT_NAME>@<GCP_PROJECT_ID>.iam.gserviceaccount.com
|
||||
```
|
||||
|
||||
2. Specify the new custom service account in your [operator paramaters](./reference/operator_parameters.md)
|
||||
2. Specify the new custom service account in your [operator parameters](./reference/operator_parameters.md)
|
||||
|
||||
If using manual deployment or kustomize, this is done by setting
|
||||
`pod_service_account_name` in your configuration file specified in the
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ under the ~/go/src sub directories.
|
|||
|
||||
Given the schema above, the Postgres Operator source code located at
|
||||
`github.com/zalando/postgres-operator` should be put at
|
||||
-`~/go/src/github.com/zalando/postgres-operator`.
|
||||
`~/go/src/github.com/zalando/postgres-operator`.
|
||||
|
||||
```bash
|
||||
export GOPATH=~/go
|
||||
|
|
@ -105,6 +105,7 @@ and K8s-like APIs for its custom resource definitions, namely the
|
|||
Postgres CRD and the operator CRD. The usage of the code generation follows
|
||||
conventions from the K8s community. Relevant scripts live in the `hack`
|
||||
directory:
|
||||
|
||||
* `update-codegen.sh` triggers code generation for the APIs defined in `pkg/apis/acid.zalan.do/`,
|
||||
* `verify-codegen.sh` checks if the generated code is up-to-date (to be used within CI).
|
||||
|
||||
|
|
@ -112,6 +113,7 @@ The `/pkg/generated/` contains the resultant code. To make these scripts work,
|
|||
you may need to `export GOPATH=$(go env GOPATH)`
|
||||
|
||||
References for code generation are:
|
||||
|
||||
* [Relevant pull request](https://github.com/zalando/postgres-operator/pull/369)
|
||||
See comments there for minor issues that can sometimes broke the generation process.
|
||||
* [Code generator source code](https://github.com/kubernetes/code-generator)
|
||||
|
|
@ -315,6 +317,7 @@ precedence.
|
|||
|
||||
Update the following Go files that obtain the configuration parameter from the
|
||||
manifest files:
|
||||
|
||||
* [operator_configuration_type.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go)
|
||||
* [operator_config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/controller/operator_config.go)
|
||||
* [config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config.go)
|
||||
|
|
@ -323,6 +326,7 @@ Postgres manifest parameters are defined in the [api package](https://github.com
|
|||
The operator behavior has to be implemented at least in [k8sres.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres.go).
|
||||
Validation of CRD parameters is controlled in [crds.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/crds.go).
|
||||
Please, reflect your changes in tests, for example in:
|
||||
|
||||
* [config_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config_test.go)
|
||||
* [k8sres_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres_test.go)
|
||||
* [util_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/util_test.go)
|
||||
|
|
@ -330,6 +334,7 @@ Please, reflect your changes in tests, for example in:
|
|||
### Updating manifest files
|
||||
|
||||
For the CRD-based configuration, please update the following files:
|
||||
|
||||
* the default [OperatorConfiguration](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
|
||||
* the CRD's [validation](https://github.com/zalando/postgres-operator/blob/master/manifests/operatorconfiguration.crd.yaml)
|
||||
* the CRD's validation in the [Helm chart](https://github.com/zalando/postgres-operator/blob/master/charts/postgres-operator/crds/operatorconfigurations.yaml)
|
||||
|
|
@ -342,6 +347,7 @@ Last but no least, update the [ConfigMap](https://github.com/zalando/postgres-op
|
|||
|
||||
Finally, add a section for each new configuration option and/or cluster manifest
|
||||
parameter in the reference documents:
|
||||
|
||||
* [config reference](reference/operator_parameters.md)
|
||||
* [manifest reference](reference/cluster_manifest.md)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ hence set it up first. For local tests we recommend to use one of the following
|
|||
solutions:
|
||||
|
||||
* [minikube](https://github.com/kubernetes/minikube/releases), which creates a
|
||||
single-node K8s cluster inside a VM (requires KVM or VirtualBox),
|
||||
K8s cluster inside a container or VM (requires Docker, KVM, Hyper-V, HyperKit, VirtualBox, or similar),
|
||||
* [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s
|
||||
clusters running on Docker (requires Docker)
|
||||
|
||||
|
|
@ -20,7 +20,7 @@ This quickstart assumes that you have started minikube or created a local kind
|
|||
cluster. Note that you can also use built-in K8s support in the Docker Desktop
|
||||
for Mac to follow the steps of this tutorial. You would have to replace
|
||||
`minikube start` and `minikube delete` with your launch actions for the Docker
|
||||
built-in K8s support.
|
||||
Desktop built-in K8s support.
|
||||
|
||||
## Configuration Options
|
||||
|
||||
|
|
@ -230,7 +230,7 @@ kubectl delete postgresql acid-minimal-cluster
|
|||
```
|
||||
|
||||
This should remove the associated StatefulSet, database Pods, Services and
|
||||
Endpoints. The PersistentVolumes are released and the PodDisruptionBudget is
|
||||
Endpoints. The PersistentVolumes are released and the PodDisruptionBudgets are
|
||||
deleted. Secrets however are not deleted and backups will remain in place.
|
||||
|
||||
When deleting a cluster while it is still starting up or got stuck during that
|
||||
|
|
|
|||
|
|
@ -116,9 +116,9 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
|
||||
* **maintenanceWindows**
|
||||
a list which defines specific time frames when certain maintenance operations
|
||||
are allowed. So far, it is only implemented for automatic major version
|
||||
upgrades. Accepted formats are "01:00-06:00" for daily maintenance windows or
|
||||
"Sat:00:00-04:00" for specific days, with all times in UTC.
|
||||
such as automatic major upgrades or master pod migration. Accepted formats
|
||||
are "01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for specific
|
||||
days, with all times in UTC.
|
||||
|
||||
* **users**
|
||||
a map of usernames to user flags for the users that should be created in the
|
||||
|
|
@ -247,7 +247,7 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
[kubernetes volumeSource](https://godoc.org/k8s.io/api/core/v1#VolumeSource).
|
||||
It allows you to mount existing PersistentVolumeClaims, ConfigMaps and Secrets inside the StatefulSet.
|
||||
Also an `emptyDir` volume can be shared between initContainer and statefulSet.
|
||||
Additionaly, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example).
|
||||
Additionally, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example).
|
||||
Set `isSubPathExpr` to true if you want to include [API environment variables](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath-expanded-environment).
|
||||
You can also specify in which container the additional Volumes will be mounted with the `targetContainers` array option.
|
||||
If `targetContainers` is empty, additional volumes will be mounted only in the `postgres` container.
|
||||
|
|
@ -257,7 +257,7 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
## Prepared Databases
|
||||
|
||||
The operator can create databases with default owner, reader and writer roles
|
||||
without the need to specifiy them under `users` or `databases` sections. Those
|
||||
without the need to specify them under `users` or `databases` sections. Those
|
||||
parameters are grouped under the `preparedDatabases` top-level key. For more
|
||||
information, see [user docs](../user.md#prepared-databases-with-roles-and-default-privileges).
|
||||
|
||||
|
|
|
|||
|
|
@ -107,8 +107,13 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
* **kubernetes_use_configmaps**
|
||||
Select if setup uses endpoints (default), or configmaps to manage leader when
|
||||
DCS is kubernetes (not etcd or similar). In OpenShift it is not possible to
|
||||
use endpoints option, and configmaps is required. By default,
|
||||
`kubernetes_use_configmaps: false`, meaning endpoints will be used.
|
||||
use endpoints option, and configmaps is required. Starting with K8s 1.33,
|
||||
endpoints are marked as deprecated. It's recommended to switch to config maps
|
||||
instead. But, to do so make sure you scale the Postgres cluster down to just
|
||||
one primary pod (e.g. using `max_instances` option). Otherwise, you risk
|
||||
running into a split-brain scenario.
|
||||
By default, `kubernetes_use_configmaps: false`, meaning endpoints will be used.
|
||||
Starting from v1.16.0 the default will be changed to `true`.
|
||||
|
||||
* **docker_image**
|
||||
Spilo Docker image for Postgres instances. For production, don't rely on the
|
||||
|
|
@ -209,7 +214,7 @@ under the `users` key.
|
|||
For all `LOGIN` roles that are not database owners the operator can rotate
|
||||
credentials in the corresponding K8s secrets by replacing the username and
|
||||
password. This means, new users will be added on each rotation inheriting
|
||||
all priviliges from the original roles. The rotation date (in YYMMDD format)
|
||||
all privileges from the original roles. The rotation date (in YYMMDD format)
|
||||
is appended to the names of the new user. The timestamp of the next rotation
|
||||
is written to the secret. The default is `false`.
|
||||
|
||||
|
|
@ -334,13 +339,13 @@ configuration they are grouped under the `kubernetes` key.
|
|||
pod namespace).
|
||||
|
||||
* **pdb_name_format**
|
||||
defines the template for PDB (Pod Disruption Budget) names created by the
|
||||
defines the template for primary PDB (Pod Disruption Budget) name created by the
|
||||
operator. The default is `postgres-{cluster}-pdb`, where `{cluster}` is
|
||||
replaced by the cluster name. Only the `{cluster}` placeholders is allowed in
|
||||
the template.
|
||||
|
||||
* **pdb_master_label_selector**
|
||||
By default the PDB will match the master role hence preventing nodes to be
|
||||
By default the primary PDB will match the master role hence preventing nodes to be
|
||||
drained if the node_readiness_label is not used. If this option if set to
|
||||
`false` the `spilo-role=master` selector will not be added to the PDB.
|
||||
|
||||
|
|
@ -552,7 +557,7 @@ configuration they are grouped under the `kubernetes` key.
|
|||
pods with `InitialDelaySeconds: 6`, `PeriodSeconds: 10`, `TimeoutSeconds: 5`,
|
||||
`SuccessThreshold: 1` and `FailureThreshold: 3`. When enabling readiness
|
||||
probes it is recommended to switch the `pod_management_policy` to `parallel`
|
||||
to avoid unneccesary waiting times in case of multiple instances failing.
|
||||
to avoid unnecessary waiting times in case of multiple instances failing.
|
||||
The default is `false`.
|
||||
|
||||
* **storage_resize_mode**
|
||||
|
|
@ -701,7 +706,7 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
|
|||
replaced by the cluster name, `{namespace}` is replaced with the namespace
|
||||
and `{hostedzone}` is replaced with the hosted zone (the value of the
|
||||
`db_hosted_zone` parameter). The `{team}` placeholder can still be used,
|
||||
although it is not recommened because the team of a cluster can change.
|
||||
although it is not recommended because the team of a cluster can change.
|
||||
If the cluster name starts with the `teamId` it will also be part of the
|
||||
DNS, aynway. No other placeholders are allowed!
|
||||
|
||||
|
|
@ -720,7 +725,7 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
|
|||
is replaced by the cluster name, `{namespace}` is replaced with the
|
||||
namespace and `{hostedzone}` is replaced with the hosted zone (the value of
|
||||
the `db_hosted_zone` parameter). The `{team}` placeholder can still be used,
|
||||
although it is not recommened because the team of a cluster can change.
|
||||
although it is not recommended because the team of a cluster can change.
|
||||
If the cluster name starts with the `teamId` it will also be part of the
|
||||
DNS, aynway. No other placeholders are allowed!
|
||||
|
||||
|
|
|
|||
|
|
@ -900,7 +900,7 @@ the PostgreSQL version between source and target cluster has to be the same.
|
|||
|
||||
To start a cluster as standby, add the following `standby` section in the YAML
|
||||
file. You can stream changes from archived WAL files (AWS S3 or Google Cloud
|
||||
Storage) or from a remote primary. Only one option can be specfied in the
|
||||
Storage) or from a remote primary. Only one option can be specified in the
|
||||
manifest:
|
||||
|
||||
```yaml
|
||||
|
|
@ -911,7 +911,7 @@ spec:
|
|||
|
||||
For GCS, you have to define STANDBY_GOOGLE_APPLICATION_CREDENTIALS as a
|
||||
[custom pod environment variable](administrator.md#custom-pod-environment-variables).
|
||||
It is not set from the config to allow for overridding.
|
||||
It is not set from the config to allow for overriding.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
|
|
@ -1282,7 +1282,7 @@ minutes if the certificates have changed and reloads postgres accordingly.
|
|||
### TLS certificates for connection pooler
|
||||
|
||||
By default, the pgBouncer image generates its own TLS certificate like Spilo.
|
||||
When the `tls` section is specfied in the manifest it will be used for the
|
||||
When the `tls` section is specified in the manifest it will be used for the
|
||||
connection pooler pod(s) as well. The security context options are hard coded
|
||||
to `runAsUser: 100` and `runAsGroup: 101`. The `fsGroup` will be the same
|
||||
like for Spilo.
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ RUN apt-get update \
|
|||
curl \
|
||||
vim \
|
||||
&& pip3 install --no-cache-dir -r requirements.txt \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl \
|
||||
&& curl -LO https://dl.k8s.io/release/v1.32.9/bin/linux/amd64/kubectl \
|
||||
&& chmod +x ./kubectl \
|
||||
&& mv ./kubectl /usr/local/bin/kubectl \
|
||||
&& apt-get clean \
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from kubernetes.client.rest import ApiException
|
|||
|
||||
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.3"
|
||||
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.4"
|
||||
SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p2"
|
||||
SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p3"
|
||||
|
||||
def to_selector(labels):
|
||||
return ",".join(["=".join(lbl) for lbl in labels.items()])
|
||||
|
|
@ -1187,7 +1187,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
Test major version upgrade: with full upgrade, maintenance window, and annotation
|
||||
"""
|
||||
def check_version():
|
||||
p = k8s.patroni_rest("acid-upgrade-test-0", "")
|
||||
p = k8s.patroni_rest("acid-upgrade-test-0", "") or {}
|
||||
version = p.get("server_version", 0) // 10000
|
||||
return version
|
||||
|
||||
|
|
@ -1237,7 +1237,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# should not upgrade because current time is not in maintenanceWindow
|
||||
current_time = datetime.now()
|
||||
maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}"
|
||||
pg_patch_version_15 = {
|
||||
pg_patch_version_15_outside_mw = {
|
||||
"spec": {
|
||||
"postgresql": {
|
||||
"version": "15"
|
||||
|
|
@ -1248,10 +1248,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15)
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_outside_mw)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
|
||||
# no pod replacement outside of the maintenance window
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
self.eventuallyEqual(check_version, 14, "Version should not be upgraded")
|
||||
|
|
@ -1259,12 +1259,12 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
second_annotations = get_annotations()
|
||||
self.assertIsNone(second_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure should not be set")
|
||||
|
||||
# change the version again to trigger operator sync
|
||||
# change maintenanceWindows to current
|
||||
maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}"
|
||||
pg_patch_version_16 = {
|
||||
pg_patch_version_15_in_mw = {
|
||||
"spec": {
|
||||
"postgresql": {
|
||||
"version": "16"
|
||||
"version": "15"
|
||||
},
|
||||
"maintenanceWindows": [
|
||||
maintenance_window_current
|
||||
|
|
@ -1273,13 +1273,13 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16)
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
self.eventuallyEqual(check_version, 16, "Version should be upgraded from 14 to 16")
|
||||
self.eventuallyEqual(check_version, 15, "Version should be upgraded from 14 to 15")
|
||||
|
||||
# check if annotation for last upgrade's success is updated after second upgrade
|
||||
third_annotations = get_annotations()
|
||||
|
|
@ -1303,20 +1303,20 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_17)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
self.eventuallyEqual(check_version, 16, "Version should not be upgraded because annotation for last upgrade's failure is set")
|
||||
|
||||
# change the version back to 15 and should remove failure annotation
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
self.eventuallyEqual(check_version, 15, "Version should not be upgraded because annotation for last upgrade's failure is set")
|
||||
|
||||
# change the version back to 15 and should remove failure annotation
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
self.eventuallyEqual(check_version, 15, "Version should not be upgraded from 15")
|
||||
fourth_annotations = get_annotations()
|
||||
self.assertIsNone(fourth_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure is not removed")
|
||||
|
||||
|
|
@ -1752,9 +1752,13 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
Test password rotation and removal of users due to retention policy
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||
leader = k8s.get_cluster_leader_pod()
|
||||
today = date.today()
|
||||
|
||||
# remember number of secrets to make sure it stays the same
|
||||
secret_count = k8s.count_secrets_with_label(cluster_label)
|
||||
|
||||
# enable password rotation for owner of foo database
|
||||
pg_patch_rotation_single_users = {
|
||||
"spec": {
|
||||
|
|
@ -1810,6 +1814,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
enable_password_rotation = {
|
||||
"data": {
|
||||
"enable_password_rotation": "true",
|
||||
"inherited_annotations": "environment",
|
||||
"password_rotation_interval": "30",
|
||||
"password_rotation_user_retention": "30", # should be set to 60
|
||||
},
|
||||
|
|
@ -1856,13 +1861,29 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: len(self.query_database_with_user(leader.metadata.name, "postgres", "SELECT 1", "foo_user")), 1,
|
||||
"Could not connect to the database with rotation user {}".format(rotation_user), 10, 5)
|
||||
|
||||
# add annotation which triggers syncSecrets call
|
||||
pg_annotation_patch = {
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"environment": "test",
|
||||
}
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_annotation_patch)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
time.sleep(10)
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), secret_count, "Unexpected number of secrets")
|
||||
|
||||
# check if rotation has been ignored for user from test_cross_namespace_secrets test
|
||||
db_user_secret = k8s.get_secret(username="test.db_user", namespace="test")
|
||||
secret_username = str(base64.b64decode(db_user_secret.data["username"]), 'utf-8')
|
||||
|
||||
self.assertEqual("test.db_user", secret_username,
|
||||
"Unexpected username in secret of test.db_user: expected {}, got {}".format("test.db_user", secret_username))
|
||||
|
||||
# check if annotation for secret has been updated
|
||||
self.assertTrue("environment" in db_user_secret.metadata.annotations, "Added annotation was not propagated to secret")
|
||||
|
||||
# disable password rotation for all other users (foo_user)
|
||||
# and pick smaller intervals to see if the third fake rotation user is dropped
|
||||
enable_password_rotation = {
|
||||
|
|
@ -2100,7 +2121,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
patch_sset_propagate_annotations = {
|
||||
"data": {
|
||||
"downscaler_annotations": "deployment-time,downscaler/*",
|
||||
"inherited_annotations": "owned-by",
|
||||
"inherited_annotations": "environment,owned-by",
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_sset_propagate_annotations)
|
||||
|
|
@ -2547,7 +2568,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertTrue(self.has_postgresql_owner_reference(config_ep.metadata.owner_references, inverse), "config endpoint owner reference check failed")
|
||||
|
||||
pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-pdb".format(cluster_name), cluster_namespace)
|
||||
self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption owner reference check failed")
|
||||
self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "primary pod disruption budget owner reference check failed")
|
||||
|
||||
pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-critical-op-pdb".format(cluster_name), cluster_namespace)
|
||||
self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption budget for critical operations owner reference check failed")
|
||||
|
||||
pg_secret = k8s.api.core_v1.read_namespaced_secret("postgres.{}.credentials.postgresql.acid.zalan.do".format(cluster_name), cluster_namespace)
|
||||
self.assertTrue(self.has_postgresql_owner_reference(pg_secret.metadata.owner_references, inverse), "postgres secret owner reference check failed")
|
||||
|
|
|
|||
80
go.mod
80
go.mod
|
|
@ -1,75 +1,75 @@
|
|||
module github.com/zalando/postgres-operator
|
||||
|
||||
go 1.23.4
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.53.8
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/aws/aws-sdk-go v1.55.8
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/r3labs/diff v1.1.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.9.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/crypto v0.43.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.30.4
|
||||
k8s.io/api v0.32.9
|
||||
k8s.io/apiextensions-apiserver v0.25.9
|
||||
k8s.io/apimachinery v0.30.4
|
||||
k8s.io/client-go v0.30.4
|
||||
k8s.io/apimachinery v0.32.9
|
||||
k8s.io/client-go v0.32.9
|
||||
k8s.io/code-generator v0.25.9
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.10.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/mod v0.28.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.37.0 // indirect
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
|
|||
177
go.sum
177
go.sum
|
|
@ -2,55 +2,52 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q
|
|||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aws/aws-sdk-go v1.53.8 h1:eoqGb1WOHIrCFKo1d51cMcnt1ralfLFaEqRkC5Zzv8k=
|
||||
github.com/aws/aws-sdk-go v1.53.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ=
|
||||
github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
|
|
@ -73,8 +70,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
|||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
|
@ -86,18 +83,19 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
|||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
|
||||
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M=
|
||||
github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
|
|
@ -111,39 +109,40 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
|
||||
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
|
||||
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -151,38 +150,41 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
|
||||
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
|
||||
golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY=
|
||||
golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
|
@ -192,31 +194,34 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs=
|
||||
k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0=
|
||||
k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI=
|
||||
k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y=
|
||||
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
|
||||
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
|
||||
k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY=
|
||||
k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
||||
k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY=
|
||||
k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc=
|
||||
k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs=
|
||||
k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c=
|
||||
k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI=
|
||||
k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w=
|
||||
k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 h1:cErOOTkQ3JW19o4lo91fFurouhP8NcoBvb7CkvhZZpk=
|
||||
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ THE SOFTWARE.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
user "os/user"
|
||||
|
|
@ -121,7 +122,7 @@ func connect(clusterName string, master bool, replica string, psql bool, user st
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = exec.Stream(remotecommand.StreamOptions{
|
||||
err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func version(namespace string) {
|
|||
|
||||
operatorDeployment := getPostgresOperator(client)
|
||||
if operatorDeployment.Name == "" {
|
||||
log.Fatal("make sure zalando's postgres operator is running")
|
||||
log.Fatalf("make sure zalando's postgres operator is running in namespace %s", namespace)
|
||||
}
|
||||
operatorImage := operatorDeployment.Spec.Template.Spec.Containers[0].Image
|
||||
imageDetails := strings.Split(operatorImage, ":")
|
||||
|
|
|
|||
|
|
@ -1,74 +1,71 @@
|
|||
module github.com/zalando/postgres-operator/kubectl-pg
|
||||
|
||||
go 1.23.4
|
||||
go 1.25
|
||||
|
||||
require (
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/zalando/postgres-operator v1.13.0
|
||||
k8s.io/api v0.30.4
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/zalando/postgres-operator v1.14.0
|
||||
k8s.io/api v0.32.9
|
||||
k8s.io/apiextensions-apiserver v0.25.9
|
||||
k8s.io/apimachinery v0.30.4
|
||||
k8s.io/client-go v0.30.4
|
||||
k8s.io/apimachinery v0.32.9
|
||||
k8s.io/client-go v0.32.9
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
golang.org/x/text v0.28.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
@ -10,44 +10,42 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER
|
|||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
|
|
@ -63,14 +61,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
|
@ -82,151 +76,128 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
|||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
|
||||
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
|
||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
|
||||
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zalando/postgres-operator v1.13.0 h1:T9Mb+ZRQyTxXbagIK66GLVGCwM3661aX2lOkNpax4s8=
|
||||
github.com/zalando/postgres-operator v1.13.0/go.mod h1:WiMEKzUny2lJHYle+7+D/5BhlvPn8prl76rEDYLsQAg=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
github.com/zalando/postgres-operator v1.14.0 h1:C8+n26C8v6fPB1SNW+Y8X6oQoEHufzGJXJzYPlix+zw=
|
||||
github.com/zalando/postgres-operator v1.14.0/go.mod h1:ZTHY3sVfHgLLRpTgyR/44JcumbACeJBjztr3o1yHBdc=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs=
|
||||
k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0=
|
||||
k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI=
|
||||
k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y=
|
||||
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
|
||||
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
|
||||
k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY=
|
||||
k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
||||
k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY=
|
||||
k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs=
|
||||
k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c=
|
||||
k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
|||
|
|
@ -122,7 +122,21 @@ function aws_upload {
|
|||
function gcs_upload {
|
||||
PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz
|
||||
|
||||
gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP"
|
||||
#Set local LOGICAL_GOOGLE_APPLICATION_CREDENTIALS to nothing or
|
||||
#value of LOGICAL_GOOGLE_APPLICATION_CREDENTIALS env var. Needed
|
||||
#because `set -o nounset` is globally set
|
||||
local LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS=${LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS:-}
|
||||
|
||||
GSUTIL_OPTIONS=("-o" "Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS")
|
||||
|
||||
#If GOOGLE_APPLICATION_CREDENTIALS is not set try to get
|
||||
#creds from metadata
|
||||
if [[ -z $LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS ]]
|
||||
then
|
||||
GSUTIL_OPTIONS[1]="GoogleCompute:service_account=default"
|
||||
fi
|
||||
|
||||
gsutil ${GSUTIL_OPTIONS[@]} cp - "$PATH_TO_BACKUP"
|
||||
}
|
||||
|
||||
function upload {
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ metadata:
|
|||
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||
spec:
|
||||
dockerImage: ghcr.io/zalando/spilo-17:4.0-p2
|
||||
dockerImage: ghcr.io/zalando/spilo-17:4.0-p3
|
||||
teamId: "acid"
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ data:
|
|||
default_memory_request: 100Mi
|
||||
# delete_annotation_date_key: delete-date
|
||||
# delete_annotation_name_key: delete-clustername
|
||||
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
|
||||
docker_image: ghcr.io/zalando/spilo-17:4.0-p3
|
||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||
enable_admin_role_for_users: "true"
|
||||
enable_crd_registration: "true"
|
||||
|
|
@ -86,7 +86,7 @@ data:
|
|||
# logical_backup_cpu_limit: ""
|
||||
# logical_backup_cpu_request: ""
|
||||
logical_backup_cronjob_environment_secret: ""
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
# logical_backup_memory_limit: ""
|
||||
|
|
|
|||
|
|
@ -59,13 +59,20 @@ rules:
|
|||
- get
|
||||
- patch
|
||||
- update
|
||||
# to read configuration from ConfigMaps
|
||||
# to read configuration from ConfigMaps and help Patroni manage the cluster if endpoints are not used
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to send events to the CRs
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
@ -78,7 +85,7 @@ rules:
|
|||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to manage endpoints which are also used by Patroni
|
||||
# to manage endpoints which are also used by Patroni (if it is using config maps)
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -249,7 +256,21 @@ kind: ClusterRole
|
|||
metadata:
|
||||
name: postgres-pod
|
||||
rules:
|
||||
# Patroni needs to watch and manage endpoints
|
||||
# Patroni needs to watch and manage config maps (or endpoints)
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# Patroni needs to watch and manage endpoints (or config maps)
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ spec:
|
|||
type: string
|
||||
docker_image:
|
||||
type: string
|
||||
default: "ghcr.io/zalando/spilo-17:4.0-p2"
|
||||
default: "ghcr.io/zalando/spilo-17:4.0-p3"
|
||||
enable_crd_registration:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -508,7 +508,7 @@ spec:
|
|||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: ghcr.io/zalando/postgres-operator:v1.14.0
|
||||
image: ghcr.io/zalando/postgres-operator:v1.15.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ kind: OperatorConfiguration
|
|||
metadata:
|
||||
name: postgresql-operator-default-configuration
|
||||
configuration:
|
||||
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
|
||||
docker_image: ghcr.io/zalando/spilo-17:4.0-p3
|
||||
# enable_crd_registration: true
|
||||
# crd_categories:
|
||||
# - all
|
||||
|
|
@ -168,7 +168,7 @@ configuration:
|
|||
# logical_backup_cpu_request: ""
|
||||
# logical_backup_memory_limit: ""
|
||||
# logical_backup_memory_request: ""
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -59,16 +59,17 @@ type Config struct {
|
|||
}
|
||||
|
||||
type kubeResources struct {
|
||||
Services map[PostgresRole]*v1.Service
|
||||
Endpoints map[PostgresRole]*v1.Endpoints
|
||||
PatroniEndpoints map[string]*v1.Endpoints
|
||||
PatroniConfigMaps map[string]*v1.ConfigMap
|
||||
Secrets map[types.UID]*v1.Secret
|
||||
Statefulset *appsv1.StatefulSet
|
||||
VolumeClaims map[types.UID]*v1.PersistentVolumeClaim
|
||||
PodDisruptionBudget *policyv1.PodDisruptionBudget
|
||||
LogicalBackupJob *batchv1.CronJob
|
||||
Streams map[string]*zalandov1.FabricEventStream
|
||||
Services map[PostgresRole]*v1.Service
|
||||
Endpoints map[PostgresRole]*v1.Endpoints
|
||||
PatroniEndpoints map[string]*v1.Endpoints
|
||||
PatroniConfigMaps map[string]*v1.ConfigMap
|
||||
Secrets map[types.UID]*v1.Secret
|
||||
Statefulset *appsv1.StatefulSet
|
||||
VolumeClaims map[types.UID]*v1.PersistentVolumeClaim
|
||||
PrimaryPodDisruptionBudget *policyv1.PodDisruptionBudget
|
||||
CriticalOpPodDisruptionBudget *policyv1.PodDisruptionBudget
|
||||
LogicalBackupJob *batchv1.CronJob
|
||||
Streams map[string]*zalandov1.FabricEventStream
|
||||
//Pods are treated separately
|
||||
}
|
||||
|
||||
|
|
@ -105,10 +106,17 @@ type Cluster struct {
|
|||
}
|
||||
|
||||
type compareStatefulsetResult struct {
|
||||
match bool
|
||||
replace bool
|
||||
rollingUpdate bool
|
||||
reasons []string
|
||||
match bool
|
||||
replace bool
|
||||
rollingUpdate bool
|
||||
reasons []string
|
||||
deletedPodAnnotations []string
|
||||
}
|
||||
|
||||
type compareLogicalBackupJobResult struct {
|
||||
match bool
|
||||
reasons []string
|
||||
deletedPodAnnotations []string
|
||||
}
|
||||
|
||||
// New creates a new cluster. This function should be called from a controller.
|
||||
|
|
@ -336,14 +344,10 @@ func (c *Cluster) Create() (err error) {
|
|||
c.logger.Infof("secrets have been successfully created")
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Secrets", "The secrets have been successfully created")
|
||||
|
||||
if c.PodDisruptionBudget != nil {
|
||||
return fmt.Errorf("pod disruption budget already exists in the cluster")
|
||||
if err = c.createPodDisruptionBudgets(); err != nil {
|
||||
return fmt.Errorf("could not create pod disruption budgets: %v", err)
|
||||
}
|
||||
pdb, err := c.createPodDisruptionBudget()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create pod disruption budget: %v", err)
|
||||
}
|
||||
c.logger.Infof("pod disruption budget %q has been successfully created", util.NameFromMeta(pdb.ObjectMeta))
|
||||
c.logger.Info("pod disruption budgets have been successfully created")
|
||||
|
||||
if c.Statefulset != nil {
|
||||
return fmt.Errorf("statefulset already exists in the cluster")
|
||||
|
|
@ -431,6 +435,7 @@ func (c *Cluster) Create() (err error) {
|
|||
}
|
||||
|
||||
func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compareStatefulsetResult {
|
||||
deletedPodAnnotations := []string{}
|
||||
reasons := make([]string, 0)
|
||||
var match, needsRollUpdate, needsReplace bool
|
||||
|
||||
|
|
@ -445,7 +450,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
needsReplace = true
|
||||
reasons = append(reasons, "new statefulset's ownerReferences do not match")
|
||||
}
|
||||
if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations); changed {
|
||||
if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations, nil); changed {
|
||||
match = false
|
||||
needsReplace = true
|
||||
reasons = append(reasons, "new statefulset's annotations do not match: "+reason)
|
||||
|
|
@ -519,7 +524,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
}
|
||||
}
|
||||
|
||||
if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations); changed {
|
||||
if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations, &deletedPodAnnotations); changed {
|
||||
match = false
|
||||
needsReplace = true
|
||||
reasons = append(reasons, "new statefulset's pod template metadata annotations does not match "+reason)
|
||||
|
|
@ -541,7 +546,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i))
|
||||
continue
|
||||
}
|
||||
if changed, reason := c.compareAnnotations(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations); changed {
|
||||
if changed, reason := c.compareAnnotations(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations, nil); changed {
|
||||
needsReplace = true
|
||||
reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q do not match the current ones: %s", name, reason))
|
||||
}
|
||||
|
|
@ -579,7 +584,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
match = false
|
||||
}
|
||||
|
||||
return &compareStatefulsetResult{match: match, reasons: reasons, rollingUpdate: needsRollUpdate, replace: needsReplace}
|
||||
return &compareStatefulsetResult{match: match, reasons: reasons, rollingUpdate: needsRollUpdate, replace: needsReplace, deletedPodAnnotations: deletedPodAnnotations}
|
||||
}
|
||||
|
||||
type containerCondition func(a, b v1.Container) bool
|
||||
|
|
@ -781,7 +786,7 @@ func volumeMountExists(mount v1.VolumeMount, mounts []v1.VolumeMount) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) {
|
||||
func (c *Cluster) compareAnnotations(old, new map[string]string, removedList *[]string) (bool, string) {
|
||||
reason := ""
|
||||
ignoredAnnotations := make(map[string]bool)
|
||||
for _, ignore := range c.OpConfig.IgnoredAnnotations {
|
||||
|
|
@ -794,6 +799,9 @@ func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string)
|
|||
}
|
||||
if _, ok := new[key]; !ok {
|
||||
reason += fmt.Sprintf(" Removed %q.", key)
|
||||
if removedList != nil {
|
||||
*removedList = append(*removedList, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -833,44 +841,57 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) {
|
|||
return false, "new service's owner references do not match the current ones"
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(old.Spec.Selector, new.Spec.Selector) {
|
||||
return false, "new service's selector does not match the current one"
|
||||
}
|
||||
|
||||
if old.Spec.ExternalTrafficPolicy != new.Spec.ExternalTrafficPolicy {
|
||||
return false, "new service's ExternalTrafficPolicy does not match the current one"
|
||||
}
|
||||
|
||||
return true, ""
|
||||
}
|
||||
|
||||
func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) (match bool, reason string) {
|
||||
func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) *compareLogicalBackupJobResult {
|
||||
deletedPodAnnotations := []string{}
|
||||
reasons := make([]string, 0)
|
||||
match := true
|
||||
|
||||
if cur.Spec.Schedule != new.Spec.Schedule {
|
||||
return false, fmt.Sprintf("new job's schedule %q does not match the current one %q",
|
||||
new.Spec.Schedule, cur.Spec.Schedule)
|
||||
match = false
|
||||
reasons = append(reasons, fmt.Sprintf("new job's schedule %q does not match the current one %q", new.Spec.Schedule, cur.Spec.Schedule))
|
||||
}
|
||||
|
||||
newImage := new.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image
|
||||
curImage := cur.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image
|
||||
if newImage != curImage {
|
||||
return false, fmt.Sprintf("new job's image %q does not match the current one %q",
|
||||
newImage, curImage)
|
||||
match = false
|
||||
reasons = append(reasons, fmt.Sprintf("new job's image %q does not match the current one %q", newImage, curImage))
|
||||
}
|
||||
|
||||
newPodAnnotation := new.Spec.JobTemplate.Spec.Template.Annotations
|
||||
curPodAnnotation := cur.Spec.JobTemplate.Spec.Template.Annotations
|
||||
if changed, reason := c.compareAnnotations(curPodAnnotation, newPodAnnotation); changed {
|
||||
return false, fmt.Sprintf("new job's pod template metadata annotations does not match " + reason)
|
||||
if changed, reason := c.compareAnnotations(curPodAnnotation, newPodAnnotation, &deletedPodAnnotations); changed {
|
||||
match = false
|
||||
reasons = append(reasons, fmt.Sprint("new job's pod template metadata annotations do not match "+reason))
|
||||
}
|
||||
|
||||
newPgVersion := getPgVersion(new)
|
||||
curPgVersion := getPgVersion(cur)
|
||||
if newPgVersion != curPgVersion {
|
||||
return false, fmt.Sprintf("new job's env PG_VERSION %q does not match the current one %q",
|
||||
newPgVersion, curPgVersion)
|
||||
match = false
|
||||
reasons = append(reasons, fmt.Sprintf("new job's env PG_VERSION %q does not match the current one %q", newPgVersion, curPgVersion))
|
||||
}
|
||||
|
||||
needsReplace := false
|
||||
reasons := make([]string, 0)
|
||||
needsReplace, reasons = c.compareContainers("cronjob container", cur.Spec.JobTemplate.Spec.Template.Spec.Containers, new.Spec.JobTemplate.Spec.Template.Spec.Containers, needsReplace, reasons)
|
||||
contReasons := make([]string, 0)
|
||||
needsReplace, contReasons = c.compareContainers("cronjob container", cur.Spec.JobTemplate.Spec.Template.Spec.Containers, new.Spec.JobTemplate.Spec.Template.Spec.Containers, needsReplace, contReasons)
|
||||
if needsReplace {
|
||||
return false, fmt.Sprintf("logical backup container specs do not match: %v", strings.Join(reasons, `', '`))
|
||||
match = false
|
||||
reasons = append(reasons, fmt.Sprintf("logical backup container specs do not match: %v", strings.Join(contReasons, `', '`)))
|
||||
}
|
||||
|
||||
return true, ""
|
||||
return &compareLogicalBackupJobResult{match: match, reasons: reasons, deletedPodAnnotations: deletedPodAnnotations}
|
||||
}
|
||||
|
||||
func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBudget) (bool, string) {
|
||||
|
|
@ -881,7 +902,7 @@ func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBud
|
|||
if !reflect.DeepEqual(new.ObjectMeta.OwnerReferences, cur.ObjectMeta.OwnerReferences) {
|
||||
return false, "new PDB's owner references do not match the current ones"
|
||||
}
|
||||
if changed, reason := c.compareAnnotations(cur.Annotations, new.Annotations); changed {
|
||||
if changed, reason := c.compareAnnotations(cur.Annotations, new.Annotations, nil); changed {
|
||||
return false, "new PDB's annotations do not match the current ones:" + reason
|
||||
}
|
||||
return true, ""
|
||||
|
|
@ -957,6 +978,11 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
defer c.mu.Unlock()
|
||||
|
||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating)
|
||||
|
||||
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
// do not apply any major version related changes yet
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
}
|
||||
c.setSpec(newSpec)
|
||||
|
||||
defer func() {
|
||||
|
|
@ -1016,10 +1042,18 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
// only when streams were not specified in oldSpec but in newSpec
|
||||
needStreamUser := len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0
|
||||
|
||||
annotationsChanged, _ := c.compareAnnotations(oldSpec.Annotations, newSpec.Annotations)
|
||||
|
||||
initUsers := !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser
|
||||
if initUsers {
|
||||
|
||||
// if inherited annotations differ secrets have to be synced on update
|
||||
newAnnotations := c.annotationsSet(nil)
|
||||
oldAnnotations := make(map[string]string)
|
||||
for _, secret := range c.Secrets {
|
||||
oldAnnotations = secret.ObjectMeta.Annotations
|
||||
break
|
||||
}
|
||||
annotationsChanged, _ := c.compareAnnotations(oldAnnotations, newAnnotations, nil)
|
||||
|
||||
if initUsers || annotationsChanged {
|
||||
c.logger.Debug("initialize users")
|
||||
if err := c.initUsers(); err != nil {
|
||||
c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err)
|
||||
|
|
@ -1027,8 +1061,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
updateFailed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
if initUsers || annotationsChanged {
|
||||
|
||||
c.logger.Debug("syncing secrets")
|
||||
//TODO: mind the secrets of the deleted/new users
|
||||
if err := c.syncSecrets(); err != nil {
|
||||
|
|
@ -1060,9 +1093,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
// pod disruption budget
|
||||
if err := c.syncPodDisruptionBudget(true); err != nil {
|
||||
c.logger.Errorf("could not sync pod disruption budget: %v", err)
|
||||
// pod disruption budgets
|
||||
if err := c.syncPodDisruptionBudgets(true); err != nil {
|
||||
c.logger.Errorf("could not sync pod disruption budgets: %v", err)
|
||||
updateFailed = true
|
||||
}
|
||||
|
||||
|
|
@ -1135,6 +1168,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
|
||||
// streams
|
||||
if len(newSpec.Spec.Streams) > 0 || len(oldSpec.Spec.Streams) != len(newSpec.Spec.Streams) {
|
||||
c.logger.Debug("syncing streams")
|
||||
if err := c.syncStreams(); err != nil {
|
||||
c.logger.Errorf("could not sync streams: %v", err)
|
||||
updateFailed = true
|
||||
|
|
@ -1207,10 +1241,10 @@ func (c *Cluster) Delete() error {
|
|||
c.logger.Info("not deleting secrets because disabled in configuration")
|
||||
}
|
||||
|
||||
if err := c.deletePodDisruptionBudget(); err != nil {
|
||||
if err := c.deletePodDisruptionBudgets(); err != nil {
|
||||
anyErrors = true
|
||||
c.logger.Warningf("could not delete pod disruption budget: %v", err)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budget: %v", err)
|
||||
c.logger.Warningf("could not delete pod disruption budgets: %v", err)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budgets: %v", err)
|
||||
}
|
||||
|
||||
for _, role := range []PostgresRole{Master, Replica} {
|
||||
|
|
@ -1709,16 +1743,17 @@ func (c *Cluster) GetCurrentProcess() Process {
|
|||
// GetStatus provides status of the cluster
|
||||
func (c *Cluster) GetStatus() *ClusterStatus {
|
||||
status := &ClusterStatus{
|
||||
Cluster: c.Name,
|
||||
Namespace: c.Namespace,
|
||||
Team: c.Spec.TeamID,
|
||||
Status: c.Status,
|
||||
Spec: c.Spec,
|
||||
MasterService: c.GetServiceMaster(),
|
||||
ReplicaService: c.GetServiceReplica(),
|
||||
StatefulSet: c.GetStatefulSet(),
|
||||
PodDisruptionBudget: c.GetPodDisruptionBudget(),
|
||||
CurrentProcess: c.GetCurrentProcess(),
|
||||
Cluster: c.Name,
|
||||
Namespace: c.Namespace,
|
||||
Team: c.Spec.TeamID,
|
||||
Status: c.Status,
|
||||
Spec: c.Spec,
|
||||
MasterService: c.GetServiceMaster(),
|
||||
ReplicaService: c.GetServiceReplica(),
|
||||
StatefulSet: c.GetStatefulSet(),
|
||||
PrimaryPodDisruptionBudget: c.GetPrimaryPodDisruptionBudget(),
|
||||
CriticalOpPodDisruptionBudget: c.GetCriticalOpPodDisruptionBudget(),
|
||||
CurrentProcess: c.GetCurrentProcess(),
|
||||
|
||||
Error: fmt.Errorf("error: %s", c.Error),
|
||||
}
|
||||
|
|
@ -1731,18 +1766,58 @@ func (c *Cluster) GetStatus() *ClusterStatus {
|
|||
return status
|
||||
}
|
||||
|
||||
// Switchover does a switchover (via Patroni) to a candidate pod
|
||||
func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) error {
|
||||
func (c *Cluster) GetSwitchoverSchedule() string {
|
||||
var possibleSwitchover, schedule time.Time
|
||||
|
||||
now := time.Now().UTC()
|
||||
for _, window := range c.Spec.MaintenanceWindows {
|
||||
// in the best case it is possible today
|
||||
possibleSwitchover = time.Date(now.Year(), now.Month(), now.Day(), window.StartTime.Hour(), window.StartTime.Minute(), 0, 0, time.UTC)
|
||||
if window.Everyday {
|
||||
if now.After(possibleSwitchover) {
|
||||
// we are already past the time for today, try tomorrow
|
||||
possibleSwitchover = possibleSwitchover.AddDate(0, 0, 1)
|
||||
}
|
||||
} else {
|
||||
if now.Weekday() != window.Weekday {
|
||||
// get closest possible time for this window
|
||||
possibleSwitchover = possibleSwitchover.AddDate(0, 0, int((7+window.Weekday-now.Weekday())%7))
|
||||
} else if now.After(possibleSwitchover) {
|
||||
// we are already past the time for today, try next week
|
||||
possibleSwitchover = possibleSwitchover.AddDate(0, 0, 7)
|
||||
}
|
||||
}
|
||||
|
||||
if (schedule.Equal(time.Time{})) || possibleSwitchover.Before(schedule) {
|
||||
schedule = possibleSwitchover
|
||||
}
|
||||
}
|
||||
return schedule.Format("2006-01-02T15:04+00")
|
||||
}
|
||||
|
||||
// Switchover does a switchover (via Patroni) to a candidate pod
|
||||
func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName, scheduled bool) error {
|
||||
var err error
|
||||
c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
ch := c.registerPodSubscriber(candidate)
|
||||
defer c.unregisterPodSubscriber(candidate)
|
||||
defer close(stopCh)
|
||||
|
||||
if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil {
|
||||
var scheduled_at string
|
||||
if scheduled {
|
||||
scheduled_at = c.GetSwitchoverSchedule()
|
||||
} else {
|
||||
c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate)
|
||||
scheduled_at = ""
|
||||
}
|
||||
|
||||
if err = c.patroni.Switchover(curMaster, candidate.Name, scheduled_at); err == nil {
|
||||
if scheduled {
|
||||
c.logger.Infof("switchover from %q to %q is scheduled at %s", curMaster.Name, candidate, scheduled_at)
|
||||
return nil
|
||||
}
|
||||
c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Successfully switched over from %q to %q", curMaster.Name, candidate)
|
||||
_, err = c.waitForPodLabel(ch, stopCh, nil)
|
||||
|
|
@ -1750,6 +1825,9 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
|
|||
err = fmt.Errorf("could not get master pod label: %v", err)
|
||||
}
|
||||
} else {
|
||||
if scheduled {
|
||||
return fmt.Errorf("could not schedule switchover: %v", err)
|
||||
}
|
||||
err = fmt.Errorf("could not switch over from %q to %q: %v", curMaster.Name, candidate, err)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1341,14 +1341,21 @@ func TestCompareEnv(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func newService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service {
|
||||
func newService(
|
||||
annotations map[string]string,
|
||||
svcType v1.ServiceType,
|
||||
sourceRanges []string,
|
||||
selector map[string]string,
|
||||
policy v1.ServiceExternalTrafficPolicyType) *v1.Service {
|
||||
svc := &v1.Service{
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: svcT,
|
||||
LoadBalancerSourceRanges: lbSr,
|
||||
Selector: selector,
|
||||
Type: svcType,
|
||||
LoadBalancerSourceRanges: sourceRanges,
|
||||
ExternalTrafficPolicy: policy,
|
||||
},
|
||||
}
|
||||
svc.Annotations = ann
|
||||
svc.Annotations = annotations
|
||||
return svc
|
||||
}
|
||||
|
||||
|
|
@ -1365,13 +1372,18 @@ func TestCompareServices(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
defaultPolicy := v1.ServiceExternalTrafficPolicyTypeCluster
|
||||
|
||||
serviceWithOwnerReference := newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"})
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil,
|
||||
defaultPolicy,
|
||||
)
|
||||
|
||||
ownerRef := metav1.OwnerReference{
|
||||
APIVersion: "acid.zalan.do/v1",
|
||||
|
|
@ -1397,14 +1409,16 @@ func TestCompareServices(t *testing.T) {
|
|||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
|
|
@ -1415,14 +1429,16 @@ func TestCompareServices(t *testing.T) {
|
|||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
match: false,
|
||||
reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`,
|
||||
},
|
||||
|
|
@ -1434,14 +1450,16 @@ func TestCompareServices(t *testing.T) {
|
|||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"185.249.56.0/22"}),
|
||||
[]string{"185.249.56.0/22"},
|
||||
nil, defaultPolicy),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange does not match the current one`,
|
||||
},
|
||||
|
|
@ -1453,14 +1471,16 @@ func TestCompareServices(t *testing.T) {
|
|||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{}),
|
||||
[]string{},
|
||||
nil, defaultPolicy),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange does not match the current one`,
|
||||
},
|
||||
|
|
@ -1472,10 +1492,39 @@ func TestCompareServices(t *testing.T) {
|
|||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: serviceWithOwnerReference,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
about: "new service has a label selector",
|
||||
current: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{},
|
||||
map[string]string{"cluster-name": "clstr", "spilo-role": "master"}, defaultPolicy),
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
about: "services differ on external traffic policy",
|
||||
current: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{},
|
||||
nil, v1.ServiceExternalTrafficPolicyTypeLocal),
|
||||
match: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
@ -1680,12 +1729,20 @@ func TestCompareLogicalBackupJob(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
match, reason := cluster.compareLogicalBackupJob(currentCronJob, desiredCronJob)
|
||||
if match != tt.match {
|
||||
t.Errorf("%s - unexpected match result %t when comparing cronjobs %#v and %#v", t.Name(), match, currentCronJob, desiredCronJob)
|
||||
} else {
|
||||
if !strings.HasPrefix(reason, tt.reason) {
|
||||
t.Errorf("%s - expected reason prefix %s, found %s", t.Name(), tt.reason, reason)
|
||||
cmp := cluster.compareLogicalBackupJob(currentCronJob, desiredCronJob)
|
||||
if cmp.match != tt.match {
|
||||
t.Errorf("%s - unexpected match result %t when comparing cronjobs %#v and %#v", t.Name(), cmp.match, currentCronJob, desiredCronJob)
|
||||
} else if !cmp.match {
|
||||
found := false
|
||||
for _, reason := range cmp.reasons {
|
||||
if strings.HasPrefix(reason, tt.reason) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
found = false
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("%s - expected reason prefix %s, not found in %#v", t.Name(), tt.reason, cmp.reasons)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
@ -2057,3 +2114,91 @@ func TestCompareVolumeMounts(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSwitchoverSchedule(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
futureTimeStart := now.Add(1 * time.Hour)
|
||||
futureWindowTimeStart := futureTimeStart.Format("15:04")
|
||||
futureWindowTimeEnd := now.Add(2 * time.Hour).Format("15:04")
|
||||
pastTimeStart := now.Add(-2 * time.Hour)
|
||||
pastWindowTimeStart := pastTimeStart.Format("15:04")
|
||||
pastWindowTimeEnd := now.Add(-1 * time.Hour).Format("15:04")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
windows []acidv1.MaintenanceWindow
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "everyday maintenance windows is later today",
|
||||
windows: []acidv1.MaintenanceWindow{
|
||||
{
|
||||
Everyday: true,
|
||||
StartTime: mustParseTime(futureWindowTimeStart),
|
||||
EndTime: mustParseTime(futureWindowTimeEnd),
|
||||
},
|
||||
},
|
||||
expected: futureTimeStart.Format("2006-01-02T15:04+00"),
|
||||
},
|
||||
{
|
||||
name: "everyday maintenance window is tomorrow",
|
||||
windows: []acidv1.MaintenanceWindow{
|
||||
{
|
||||
Everyday: true,
|
||||
StartTime: mustParseTime(pastWindowTimeStart),
|
||||
EndTime: mustParseTime(pastWindowTimeEnd),
|
||||
},
|
||||
},
|
||||
expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"),
|
||||
},
|
||||
{
|
||||
name: "weekday maintenance windows is later today",
|
||||
windows: []acidv1.MaintenanceWindow{
|
||||
{
|
||||
Weekday: now.Weekday(),
|
||||
StartTime: mustParseTime(futureWindowTimeStart),
|
||||
EndTime: mustParseTime(futureWindowTimeEnd),
|
||||
},
|
||||
},
|
||||
expected: futureTimeStart.Format("2006-01-02T15:04+00"),
|
||||
},
|
||||
{
|
||||
name: "weekday maintenance windows is passed for today",
|
||||
windows: []acidv1.MaintenanceWindow{
|
||||
{
|
||||
Weekday: now.Weekday(),
|
||||
StartTime: mustParseTime(pastWindowTimeStart),
|
||||
EndTime: mustParseTime(pastWindowTimeEnd),
|
||||
},
|
||||
},
|
||||
expected: pastTimeStart.AddDate(0, 0, 7).Format("2006-01-02T15:04+00"),
|
||||
},
|
||||
{
|
||||
name: "choose the earliest window",
|
||||
windows: []acidv1.MaintenanceWindow{
|
||||
{
|
||||
Weekday: now.AddDate(0, 0, 2).Weekday(),
|
||||
StartTime: mustParseTime(futureWindowTimeStart),
|
||||
EndTime: mustParseTime(futureWindowTimeEnd),
|
||||
},
|
||||
{
|
||||
Everyday: true,
|
||||
StartTime: mustParseTime(pastWindowTimeStart),
|
||||
EndTime: mustParseTime(pastWindowTimeEnd),
|
||||
},
|
||||
},
|
||||
expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cluster.Spec.MaintenanceWindows = tt.windows
|
||||
schedule := cluster.GetSwitchoverSchedule()
|
||||
if schedule != tt.expected {
|
||||
t.Errorf("Expected GetSwitchoverSchedule to return %s, returned: %s", tt.expected, schedule)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package cluster
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
|
@ -977,6 +978,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
err error
|
||||
)
|
||||
|
||||
updatedPodAnnotations := map[string]*string{}
|
||||
syncReason := make([]string, 0)
|
||||
deployment, err = c.KubeClient.
|
||||
Deployments(c.Namespace).
|
||||
|
|
@ -1038,9 +1040,27 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
}
|
||||
|
||||
newPodAnnotations := c.annotationsSet(c.generatePodAnnotations(&c.Spec))
|
||||
if changed, reason := c.compareAnnotations(deployment.Spec.Template.Annotations, newPodAnnotations); changed {
|
||||
deletedPodAnnotations := []string{}
|
||||
if changed, reason := c.compareAnnotations(deployment.Spec.Template.Annotations, newPodAnnotations, &deletedPodAnnotations); changed {
|
||||
specSync = true
|
||||
syncReason = append(syncReason, []string{"new connection pooler's pod template annotations do not match the current ones: " + reason}...)
|
||||
|
||||
for _, anno := range deletedPodAnnotations {
|
||||
updatedPodAnnotations[anno] = nil
|
||||
}
|
||||
templateMetadataReq := map[string]map[string]map[string]map[string]map[string]*string{
|
||||
"spec": {"template": {"metadata": {"annotations": updatedPodAnnotations}}}}
|
||||
patch, err := json.Marshal(templateMetadataReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not marshal ObjectMeta for %s connection pooler's pod template: %v", role, err)
|
||||
}
|
||||
deployment, err = c.KubeClient.Deployments(c.Namespace).Patch(context.TODO(),
|
||||
deployment.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "")
|
||||
if err != nil {
|
||||
c.logger.Errorf("failed to patch %s connection pooler's pod template: %v", role, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deployment.Spec.Template.Annotations = newPodAnnotations
|
||||
}
|
||||
|
||||
|
|
@ -1064,7 +1084,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
}
|
||||
|
||||
newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(nil)) // including the downscaling annotations
|
||||
if changed, _ := c.compareAnnotations(deployment.Annotations, newAnnotations); changed {
|
||||
if changed, _ := c.compareAnnotations(deployment.Annotations, newAnnotations, nil); changed {
|
||||
deployment, err = patchConnectionPoolerAnnotations(c.KubeClient, deployment, newAnnotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -1098,14 +1118,20 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("could not delete pooler pod: %v", err)
|
||||
}
|
||||
} else if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations); changed {
|
||||
patchData, err := metaAnnotationsPatch(deployment.Spec.Template.Annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not form patch for pooler's pod annotations: %v", err)
|
||||
} else if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations, nil); changed {
|
||||
metadataReq := map[string]map[string]map[string]*string{"metadata": {}}
|
||||
|
||||
for anno, val := range deployment.Spec.Template.Annotations {
|
||||
updatedPodAnnotations[anno] = &val
|
||||
}
|
||||
_, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||
metadataReq["metadata"]["annotations"] = updatedPodAnnotations
|
||||
patch, err := json.Marshal(metadataReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not patch annotations for pooler's pod %q: %v", pod.Name, err)
|
||||
return nil, fmt.Errorf("could not marshal ObjectMeta for %s connection pooler's pods: %v", role, err)
|
||||
}
|
||||
_, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not patch annotations for %s connection pooler's pod %q: %v", role, pod.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,9 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
|
@ -12,19 +14,16 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
|
|
@ -109,10 +108,15 @@ func (c *Cluster) servicePort(role PostgresRole) int32 {
|
|||
return pgPort
|
||||
}
|
||||
|
||||
func (c *Cluster) podDisruptionBudgetName() string {
|
||||
func (c *Cluster) PrimaryPodDisruptionBudgetName() string {
|
||||
return c.OpConfig.PDBNameFormat.Format("cluster", c.Name)
|
||||
}
|
||||
|
||||
func (c *Cluster) criticalOpPodDisruptionBudgetName() string {
|
||||
pdbTemplate := config.StringTemplate("postgres-{cluster}-critical-op-pdb")
|
||||
return pdbTemplate.Format("cluster", c.Name)
|
||||
}
|
||||
|
||||
func makeDefaultResources(config *config.Config) acidv1.Resources {
|
||||
|
||||
defaultRequests := acidv1.ResourceDescription{
|
||||
|
|
@ -166,7 +170,7 @@ func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) e
|
|||
if isSmaller {
|
||||
msg = fmt.Sprintf("defined CPU limit %s for %q container is below required minimum %s and will be increased",
|
||||
cpuLimit.String(), constants.PostgresContainerName, minCPULimit)
|
||||
c.logger.Warningf(msg)
|
||||
c.logger.Warningf("%s", msg)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
|
||||
resources.Limits[v1.ResourceCPU], _ = resource.ParseQuantity(minCPULimit)
|
||||
}
|
||||
|
|
@ -183,7 +187,7 @@ func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) e
|
|||
if isSmaller {
|
||||
msg = fmt.Sprintf("defined memory limit %s for %q container is below required minimum %s and will be increased",
|
||||
memoryLimit.String(), constants.PostgresContainerName, minMemoryLimit)
|
||||
c.logger.Warningf(msg)
|
||||
c.logger.Warningf("%s", msg)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
|
||||
resources.Limits[v1.ResourceMemory], _ = resource.ParseQuantity(minMemoryLimit)
|
||||
}
|
||||
|
|
@ -519,13 +523,14 @@ func (c *Cluster) nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinit
|
|||
},
|
||||
}
|
||||
} else {
|
||||
if c.OpConfig.NodeReadinessLabelMerge == "OR" {
|
||||
switch c.OpConfig.NodeReadinessLabelMerge {
|
||||
case "OR":
|
||||
manifestTerms := nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
manifestTerms = append(manifestTerms, nodeReadinessSelectorTerm)
|
||||
nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
||||
NodeSelectorTerms: manifestTerms,
|
||||
}
|
||||
} else if c.OpConfig.NodeReadinessLabelMerge == "AND" {
|
||||
case "AND":
|
||||
for i, nodeSelectorTerm := range nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
|
||||
manifestExpressions := nodeSelectorTerm.MatchExpressions
|
||||
manifestExpressions = append(manifestExpressions, matchExpressions...)
|
||||
|
|
@ -1005,6 +1010,9 @@ func (c *Cluster) generateSpiloPodEnvVars(
|
|||
|
||||
if c.patroniUsesKubernetes() {
|
||||
envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"})
|
||||
if c.OpConfig.EnablePodDisruptionBudget != nil && *c.OpConfig.EnablePodDisruptionBudget {
|
||||
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_BOOTSTRAP_LABELS", Value: "{\"critical-operation\":\"true\"}"})
|
||||
}
|
||||
} else {
|
||||
envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost})
|
||||
}
|
||||
|
|
@ -1290,7 +1298,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||
}
|
||||
|
||||
if spec.InitContainers != nil && len(spec.InitContainers) > 0 {
|
||||
if len(spec.InitContainers) > 0 {
|
||||
if c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) {
|
||||
c.logger.Warningf("initContainers specified but disabled in configuration - next statefulset creation would fail")
|
||||
}
|
||||
|
|
@ -1393,7 +1401,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
|
||||
// generate container specs for sidecars specified in the cluster manifest
|
||||
clusterSpecificSidecars := []v1.Container{}
|
||||
if spec.Sidecars != nil && len(spec.Sidecars) > 0 {
|
||||
if len(spec.Sidecars) > 0 {
|
||||
// warn if sidecars are defined, but globally disabled (does not apply to globally defined sidecars)
|
||||
if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
|
||||
c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail")
|
||||
|
|
@ -1497,11 +1505,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
updateStrategy := appsv1.StatefulSetUpdateStrategy{Type: appsv1.OnDeleteStatefulSetStrategyType}
|
||||
|
||||
var podManagementPolicy appsv1.PodManagementPolicyType
|
||||
if c.OpConfig.PodManagementPolicy == "ordered_ready" {
|
||||
switch c.OpConfig.PodManagementPolicy {
|
||||
case "ordered_ready":
|
||||
podManagementPolicy = appsv1.OrderedReadyPodManagement
|
||||
} else if c.OpConfig.PodManagementPolicy == "parallel" {
|
||||
case "parallel":
|
||||
podManagementPolicy = appsv1.ParallelPodManagement
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
||||
}
|
||||
|
||||
|
|
@ -1920,7 +1929,7 @@ func (c *Cluster) generateSingleUserSecret(pgUser spec.PgUser) *v1.Secret {
|
|||
|
||||
// if secret lives in another namespace we cannot set ownerReferences
|
||||
var ownerReferences []metav1.OwnerReference
|
||||
if c.Config.OpConfig.EnableCrossNamespaceSecret && strings.Contains(username, ".") {
|
||||
if c.Config.OpConfig.EnableCrossNamespaceSecret && c.Postgresql.ObjectMeta.Namespace != pgUser.Namespace {
|
||||
ownerReferences = nil
|
||||
} else {
|
||||
ownerReferences = c.ownerReferences()
|
||||
|
|
@ -2207,7 +2216,7 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript
|
|||
return result
|
||||
}
|
||||
|
||||
func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||
func (c *Cluster) generatePrimaryPodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||
minAvailable := intstr.FromInt(1)
|
||||
pdbEnabled := c.OpConfig.EnablePodDisruptionBudget
|
||||
pdbMasterLabelSelector := c.OpConfig.PDBMasterLabelSelector
|
||||
|
|
@ -2225,7 +2234,36 @@ func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
|||
|
||||
return &policyv1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.podDisruptionBudgetName(),
|
||||
Name: c.PrimaryPodDisruptionBudgetName(),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.labelsSet(true),
|
||||
Annotations: c.annotationsSet(nil),
|
||||
OwnerReferences: c.ownerReferences(),
|
||||
},
|
||||
Spec: policyv1.PodDisruptionBudgetSpec{
|
||||
MinAvailable: &minAvailable,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) generateCriticalOpPodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||
minAvailable := intstr.FromInt32(c.Spec.NumberOfInstances)
|
||||
pdbEnabled := c.OpConfig.EnablePodDisruptionBudget
|
||||
|
||||
// if PodDisruptionBudget is disabled or if there are no DB pods, set the budget to 0.
|
||||
if (pdbEnabled != nil && !(*pdbEnabled)) || c.Spec.NumberOfInstances <= 0 {
|
||||
minAvailable = intstr.FromInt(0)
|
||||
}
|
||||
|
||||
labels := c.labelsSet(false)
|
||||
labels["critical-operation"] = "true"
|
||||
|
||||
return &policyv1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.criticalOpPodDisruptionBudgetName(),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.labelsSet(true),
|
||||
Annotations: c.annotationsSet(nil),
|
||||
|
|
|
|||
|
|
@ -2349,22 +2349,34 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
testLabelsAndSelectors := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error {
|
||||
masterLabelSelectorDisabled := cluster.OpConfig.PDBMasterLabelSelector != nil && !*cluster.OpConfig.PDBMasterLabelSelector
|
||||
if podDisruptionBudget.ObjectMeta.Namespace != "myapp" {
|
||||
return fmt.Errorf("Object Namespace incorrect.")
|
||||
}
|
||||
if !reflect.DeepEqual(podDisruptionBudget.Labels, map[string]string{"team": "myapp", "cluster-name": "myapp-database"}) {
|
||||
return fmt.Errorf("Labels incorrect.")
|
||||
}
|
||||
if !masterLabelSelectorDisabled &&
|
||||
!reflect.DeepEqual(podDisruptionBudget.Spec.Selector, &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}}) {
|
||||
testLabelsAndSelectors := func(isPrimary bool) func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error {
|
||||
return func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error {
|
||||
masterLabelSelectorDisabled := cluster.OpConfig.PDBMasterLabelSelector != nil && !*cluster.OpConfig.PDBMasterLabelSelector
|
||||
if podDisruptionBudget.ObjectMeta.Namespace != "myapp" {
|
||||
return fmt.Errorf("Object Namespace incorrect.")
|
||||
}
|
||||
expectedLabels := map[string]string{"team": "myapp", "cluster-name": "myapp-database"}
|
||||
if !reflect.DeepEqual(podDisruptionBudget.Labels, expectedLabels) {
|
||||
return fmt.Errorf("Labels incorrect, got %#v, expected %#v", podDisruptionBudget.Labels, expectedLabels)
|
||||
}
|
||||
if !masterLabelSelectorDisabled {
|
||||
if isPrimary {
|
||||
expectedLabels := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}}
|
||||
if !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, expectedLabels) {
|
||||
return fmt.Errorf("MatchLabels incorrect, got %#v, expected %#v", podDisruptionBudget.Spec.Selector, expectedLabels)
|
||||
}
|
||||
} else {
|
||||
expectedLabels := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"cluster-name": "myapp-database", "critical-operation": "true"}}
|
||||
if !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, expectedLabels) {
|
||||
return fmt.Errorf("MatchLabels incorrect, got %#v, expected %#v", podDisruptionBudget.Spec.Selector, expectedLabels)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("MatchLabels incorrect.")
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
testPodDisruptionBudgetOwnerReference := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error {
|
||||
|
|
@ -2400,7 +2412,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
|||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-pdb"),
|
||||
hasMinAvailable(1),
|
||||
testLabelsAndSelectors,
|
||||
testLabelsAndSelectors(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -2417,7 +2429,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
|||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-pdb"),
|
||||
hasMinAvailable(0),
|
||||
testLabelsAndSelectors,
|
||||
testLabelsAndSelectors(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -2434,7 +2446,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
|||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-pdb"),
|
||||
hasMinAvailable(0),
|
||||
testLabelsAndSelectors,
|
||||
testLabelsAndSelectors(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -2451,7 +2463,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
|||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-databass-budget"),
|
||||
hasMinAvailable(1),
|
||||
testLabelsAndSelectors,
|
||||
testLabelsAndSelectors(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -2468,7 +2480,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
|||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-pdb"),
|
||||
hasMinAvailable(1),
|
||||
testLabelsAndSelectors,
|
||||
testLabelsAndSelectors(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -2485,13 +2497,99 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
|||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-pdb"),
|
||||
hasMinAvailable(1),
|
||||
testLabelsAndSelectors,
|
||||
testLabelsAndSelectors(true),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
result := tt.spec.generatePodDisruptionBudget()
|
||||
result := tt.spec.generatePrimaryPodDisruptionBudget()
|
||||
for _, check := range tt.check {
|
||||
err := check(tt.spec, result)
|
||||
if err != nil {
|
||||
t.Errorf("%s [%s]: PodDisruptionBudget spec is incorrect, %+v",
|
||||
testName, tt.scenario, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testCriticalOp := []struct {
|
||||
scenario string
|
||||
spec *Cluster
|
||||
check []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error
|
||||
}{
|
||||
{
|
||||
scenario: "With multiple instances",
|
||||
spec: New(
|
||||
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}},
|
||||
k8sutil.KubernetesClient{},
|
||||
acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
||||
logger,
|
||||
eventRecorder),
|
||||
check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{
|
||||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-critical-op-pdb"),
|
||||
hasMinAvailable(3),
|
||||
testLabelsAndSelectors(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
scenario: "With zero instances",
|
||||
spec: New(
|
||||
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}},
|
||||
k8sutil.KubernetesClient{},
|
||||
acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}},
|
||||
logger,
|
||||
eventRecorder),
|
||||
check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{
|
||||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-critical-op-pdb"),
|
||||
hasMinAvailable(0),
|
||||
testLabelsAndSelectors(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
scenario: "With PodDisruptionBudget disabled",
|
||||
spec: New(
|
||||
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}},
|
||||
k8sutil.KubernetesClient{},
|
||||
acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
||||
logger,
|
||||
eventRecorder),
|
||||
check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{
|
||||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-critical-op-pdb"),
|
||||
hasMinAvailable(0),
|
||||
testLabelsAndSelectors(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
scenario: "With OwnerReference enabled",
|
||||
spec: New(
|
||||
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}},
|
||||
k8sutil.KubernetesClient{},
|
||||
acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
||||
logger,
|
||||
eventRecorder),
|
||||
check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{
|
||||
testPodDisruptionBudgetOwnerReference,
|
||||
hasName("postgres-myapp-database-critical-op-pdb"),
|
||||
hasMinAvailable(3),
|
||||
testLabelsAndSelectors(false),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCriticalOp {
|
||||
result := tt.spec.generateCriticalOpPodDisruptionBudget()
|
||||
for _, check := range tt.check {
|
||||
err := check(tt.spec, result)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ import (
|
|||
|
||||
// VersionMap Map of version numbers
|
||||
var VersionMap = map[string]int{
|
||||
"12": 120000,
|
||||
"13": 130000,
|
||||
"14": 140000,
|
||||
"15": 150000,
|
||||
|
|
@ -106,6 +105,22 @@ func (c *Cluster) removeFailuresAnnotation() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) criticalOperationLabel(pods []v1.Pod, value *string) error {
|
||||
metadataReq := map[string]map[string]map[string]*string{"metadata": {"labels": {"critical-operation": value}}}
|
||||
|
||||
patchReq, err := json.Marshal(metadataReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal ObjectMeta: %v", err)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
_, err = c.KubeClient.Pods(c.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patchReq, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is "off").
|
||||
|
||||
|
|
@ -129,17 +144,13 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if !isInMainternanceWindow(c.Spec.MaintenanceWindows) {
|
||||
c.logger.Infof("skipping major version upgrade, not in maintenance window")
|
||||
return nil
|
||||
}
|
||||
|
||||
pods, err := c.listPods()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allRunning := true
|
||||
isStandbyCluster := false
|
||||
|
||||
var masterPod *v1.Pod
|
||||
|
||||
|
|
@ -147,8 +158,9 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
ps, _ := c.patroni.GetMemberData(&pod)
|
||||
|
||||
if ps.Role == "standby_leader" {
|
||||
c.logger.Errorf("skipping major version upgrade for %s/%s standby cluster. Re-deploy standby cluster with the required Postgres version specified", c.Namespace, c.Name)
|
||||
return nil
|
||||
isStandbyCluster = true
|
||||
c.currentMajorVersion = ps.ServerVersion
|
||||
break
|
||||
}
|
||||
|
||||
if ps.State != "running" {
|
||||
|
|
@ -175,6 +187,9 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
}
|
||||
c.logger.Infof("recheck cluster version is already up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion)
|
||||
return nil
|
||||
} else if isStandbyCluster {
|
||||
c.logger.Warnf("skipping major version upgrade for %s/%s standby cluster. Re-deploy standby cluster with the required Postgres version specified", c.Namespace, c.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists {
|
||||
|
|
@ -182,6 +197,11 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
c.logger.Infof("skipping major version upgrade, not in maintenance window")
|
||||
return nil
|
||||
}
|
||||
|
||||
members, err := c.patroni.GetClusterMembers(masterPod)
|
||||
if err != nil {
|
||||
c.logger.Error("could not get cluster members data from Patroni API, skipping major version upgrade")
|
||||
|
|
@ -216,9 +236,20 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
|
||||
isUpgradeSuccess := true
|
||||
numberOfPods := len(pods)
|
||||
if allRunning && masterPod != nil {
|
||||
if allRunning {
|
||||
c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion)
|
||||
if c.currentMajorVersion < desiredVersion {
|
||||
defer func() error {
|
||||
if err = c.criticalOperationLabel(pods, nil); err != nil {
|
||||
return fmt.Errorf("failed to remove critical-operation label: %s", err)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
val := "true"
|
||||
if err = c.criticalOperationLabel(pods, &val); err != nil {
|
||||
return fmt.Errorf("failed to assign critical-operation label: %s", err)
|
||||
}
|
||||
|
||||
podName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name}
|
||||
c.logger.Infof("triggering major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
||||
|
|
@ -245,7 +276,7 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
isUpgradeSuccess = false
|
||||
c.annotatePostgresResource(isUpgradeSuccess)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, scriptErrMsg)
|
||||
return fmt.Errorf(scriptErrMsg)
|
||||
return fmt.Errorf("%s", scriptErrMsg)
|
||||
}
|
||||
|
||||
c.annotatePostgresResource(isUpgradeSuccess)
|
||||
|
|
|
|||
|
|
@ -3,12 +3,11 @@ package cluster
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -280,11 +279,16 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
return fmt.Errorf("could not move pod: %v", err)
|
||||
}
|
||||
|
||||
scheduleSwitchover := false
|
||||
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
c.logger.Infof("postponing switchover, not in maintenance window")
|
||||
scheduleSwitchover = true
|
||||
}
|
||||
err = retryutil.Retry(1*time.Minute, 5*time.Minute,
|
||||
func() (bool, error) {
|
||||
err := c.Switchover(oldMaster, masterCandidateName)
|
||||
err := c.Switchover(oldMaster, masterCandidateName, scheduleSwitchover)
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not failover to pod %q: %v", masterCandidateName, err)
|
||||
c.logger.Errorf("could not switchover to pod %q: %v", masterCandidateName, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
|
@ -428,9 +432,10 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp
|
|||
}
|
||||
|
||||
newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel])
|
||||
if newRole == Replica {
|
||||
switch newRole {
|
||||
case Replica:
|
||||
replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta))
|
||||
} else if newRole == Master {
|
||||
case Master:
|
||||
newMasterPod = newPod
|
||||
}
|
||||
}
|
||||
|
|
@ -445,7 +450,7 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp
|
|||
// do not recreate master now so it will keep the update flag and switchover will be retried on next sync
|
||||
return fmt.Errorf("skipping switchover: %v", err)
|
||||
}
|
||||
if err := c.Switchover(masterPod, masterCandidate); err != nil {
|
||||
if err := c.Switchover(masterPod, masterCandidate, false); err != nil {
|
||||
return fmt.Errorf("could not perform switch over: %v", err)
|
||||
}
|
||||
} else if newMasterPod == nil && len(replicas) == 0 {
|
||||
|
|
|
|||
|
|
@ -23,8 +23,13 @@ const (
|
|||
)
|
||||
|
||||
func (c *Cluster) listResources() error {
|
||||
if c.PodDisruptionBudget != nil {
|
||||
c.logger.Infof("found pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta), c.PodDisruptionBudget.UID)
|
||||
if c.PrimaryPodDisruptionBudget != nil {
|
||||
c.logger.Infof("found primary pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta), c.PrimaryPodDisruptionBudget.UID)
|
||||
}
|
||||
|
||||
if c.CriticalOpPodDisruptionBudget != nil {
|
||||
c.logger.Infof("found pod disruption budget for critical operations: %q (uid: %q)", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta), c.CriticalOpPodDisruptionBudget.UID)
|
||||
|
||||
}
|
||||
|
||||
if c.Statefulset != nil {
|
||||
|
|
@ -89,12 +94,12 @@ func (c *Cluster) listResources() error {
|
|||
func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) {
|
||||
c.setProcessName("creating statefulset")
|
||||
// check if it's allowed that spec contains initContainers
|
||||
if c.Spec.InitContainers != nil && len(c.Spec.InitContainers) > 0 &&
|
||||
if len(c.Spec.InitContainers) > 0 &&
|
||||
c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) {
|
||||
return nil, fmt.Errorf("initContainers specified but disabled in configuration")
|
||||
}
|
||||
// check if it's allowed that spec contains sidecars
|
||||
if c.Spec.Sidecars != nil && len(c.Spec.Sidecars) > 0 &&
|
||||
if len(c.Spec.Sidecars) > 0 &&
|
||||
c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
|
||||
return nil, fmt.Errorf("sidecar containers specified but disabled in configuration")
|
||||
}
|
||||
|
|
@ -162,8 +167,8 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error {
|
|||
return fmt.Errorf("pod %q does not belong to cluster", podName)
|
||||
}
|
||||
|
||||
if err := c.patroni.Switchover(&masterPod[0], masterCandidatePod.Name); err != nil {
|
||||
return fmt.Errorf("could not failover: %v", err)
|
||||
if err := c.patroni.Switchover(&masterPod[0], masterCandidatePod.Name, ""); err != nil {
|
||||
return fmt.Errorf("could not switchover: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -329,7 +334,7 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe
|
|||
}
|
||||
}
|
||||
|
||||
if changed, _ := c.compareAnnotations(oldService.Annotations, newService.Annotations); changed {
|
||||
if changed, _ := c.compareAnnotations(oldService.Annotations, newService.Annotations, nil); changed {
|
||||
patchData, err := metaAnnotationsPatch(newService.Annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not form patch for service %q annotations: %v", oldService.Name, err)
|
||||
|
|
@ -417,59 +422,128 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset
|
|||
return result
|
||||
}
|
||||
|
||||
func (c *Cluster) createPodDisruptionBudget() (*policyv1.PodDisruptionBudget, error) {
|
||||
podDisruptionBudgetSpec := c.generatePodDisruptionBudget()
|
||||
func (c *Cluster) createPrimaryPodDisruptionBudget() error {
|
||||
c.logger.Debug("creating primary pod disruption budget")
|
||||
if c.PrimaryPodDisruptionBudget != nil {
|
||||
c.logger.Warning("primary pod disruption budget already exists in the cluster")
|
||||
return nil
|
||||
}
|
||||
|
||||
podDisruptionBudgetSpec := c.generatePrimaryPodDisruptionBudget()
|
||||
podDisruptionBudget, err := c.KubeClient.
|
||||
PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace).
|
||||
Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
c.PodDisruptionBudget = podDisruptionBudget
|
||||
c.logger.Infof("primary pod disruption budget %q has been successfully created", util.NameFromMeta(podDisruptionBudget.ObjectMeta))
|
||||
c.PrimaryPodDisruptionBudget = podDisruptionBudget
|
||||
|
||||
return podDisruptionBudget, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) updatePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error {
|
||||
if c.PodDisruptionBudget == nil {
|
||||
return fmt.Errorf("there is no pod disruption budget in the cluster")
|
||||
func (c *Cluster) createCriticalOpPodDisruptionBudget() error {
|
||||
c.logger.Debug("creating pod disruption budget for critical operations")
|
||||
if c.CriticalOpPodDisruptionBudget != nil {
|
||||
c.logger.Warning("pod disruption budget for critical operations already exists in the cluster")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.deletePodDisruptionBudget(); err != nil {
|
||||
return fmt.Errorf("could not delete pod disruption budget: %v", err)
|
||||
podDisruptionBudgetSpec := c.generateCriticalOpPodDisruptionBudget()
|
||||
podDisruptionBudget, err := c.KubeClient.
|
||||
PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace).
|
||||
Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Infof("pod disruption budget for critical operations %q has been successfully created", util.NameFromMeta(podDisruptionBudget.ObjectMeta))
|
||||
c.CriticalOpPodDisruptionBudget = podDisruptionBudget
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) createPodDisruptionBudgets() error {
|
||||
errors := make([]string, 0)
|
||||
|
||||
err := c.createPrimaryPodDisruptionBudget()
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Sprintf("could not create primary pod disruption budget: %v", err))
|
||||
}
|
||||
|
||||
err = c.createCriticalOpPodDisruptionBudget()
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Sprintf("could not create pod disruption budget for critical operations: %v", err))
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("%v", strings.Join(errors, `', '`))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) updatePrimaryPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error {
|
||||
c.logger.Debug("updating primary pod disruption budget")
|
||||
if c.PrimaryPodDisruptionBudget == nil {
|
||||
return fmt.Errorf("there is no primary pod disruption budget in the cluster")
|
||||
}
|
||||
|
||||
if err := c.deletePrimaryPodDisruptionBudget(); err != nil {
|
||||
return fmt.Errorf("could not delete primary pod disruption budget: %v", err)
|
||||
}
|
||||
|
||||
newPdb, err := c.KubeClient.
|
||||
PodDisruptionBudgets(pdb.Namespace).
|
||||
Create(context.TODO(), pdb, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create pod disruption budget: %v", err)
|
||||
return fmt.Errorf("could not create primary pod disruption budget: %v", err)
|
||||
}
|
||||
c.PodDisruptionBudget = newPdb
|
||||
c.PrimaryPodDisruptionBudget = newPdb
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deletePodDisruptionBudget() error {
|
||||
c.logger.Debug("deleting pod disruption budget")
|
||||
if c.PodDisruptionBudget == nil {
|
||||
c.logger.Debug("there is no pod disruption budget in the cluster")
|
||||
func (c *Cluster) updateCriticalOpPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error {
|
||||
c.logger.Debug("updating pod disruption budget for critical operations")
|
||||
if c.CriticalOpPodDisruptionBudget == nil {
|
||||
return fmt.Errorf("there is no pod disruption budget for critical operations in the cluster")
|
||||
}
|
||||
|
||||
if err := c.deleteCriticalOpPodDisruptionBudget(); err != nil {
|
||||
return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err)
|
||||
}
|
||||
|
||||
newPdb, err := c.KubeClient.
|
||||
PodDisruptionBudgets(pdb.Namespace).
|
||||
Create(context.TODO(), pdb, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create pod disruption budget for critical operations: %v", err)
|
||||
}
|
||||
c.CriticalOpPodDisruptionBudget = newPdb
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deletePrimaryPodDisruptionBudget() error {
|
||||
c.logger.Debug("deleting primary pod disruption budget")
|
||||
if c.PrimaryPodDisruptionBudget == nil {
|
||||
c.logger.Debug("there is no primary pod disruption budget in the cluster")
|
||||
return nil
|
||||
}
|
||||
|
||||
pdbName := util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta)
|
||||
pdbName := util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta)
|
||||
err := c.KubeClient.
|
||||
PodDisruptionBudgets(c.PodDisruptionBudget.Namespace).
|
||||
Delete(context.TODO(), c.PodDisruptionBudget.Name, c.deleteOptions)
|
||||
PodDisruptionBudgets(c.PrimaryPodDisruptionBudget.Namespace).
|
||||
Delete(context.TODO(), c.PrimaryPodDisruptionBudget.Name, c.deleteOptions)
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta))
|
||||
c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta))
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not delete PodDisruptionBudget: %v", err)
|
||||
return fmt.Errorf("could not delete primary pod disruption budget: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta))
|
||||
c.PodDisruptionBudget = nil
|
||||
c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta))
|
||||
c.PrimaryPodDisruptionBudget = nil
|
||||
|
||||
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
|
||||
func() (bool, error) {
|
||||
|
|
@ -483,12 +557,67 @@ func (c *Cluster) deletePodDisruptionBudget() error {
|
|||
return false, err2
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not delete pod disruption budget: %v", err)
|
||||
return fmt.Errorf("could not delete primary pod disruption budget: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deleteCriticalOpPodDisruptionBudget() error {
|
||||
c.logger.Debug("deleting pod disruption budget for critical operations")
|
||||
if c.CriticalOpPodDisruptionBudget == nil {
|
||||
c.logger.Debug("there is no pod disruption budget for critical operations in the cluster")
|
||||
return nil
|
||||
}
|
||||
|
||||
pdbName := util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta)
|
||||
err := c.KubeClient.
|
||||
PodDisruptionBudgets(c.CriticalOpPodDisruptionBudget.Namespace).
|
||||
Delete(context.TODO(), c.CriticalOpPodDisruptionBudget.Name, c.deleteOptions)
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta))
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta))
|
||||
c.CriticalOpPodDisruptionBudget = nil
|
||||
|
||||
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
|
||||
func() (bool, error) {
|
||||
_, err2 := c.KubeClient.PodDisruptionBudgets(pdbName.Namespace).Get(context.TODO(), pdbName.Name, metav1.GetOptions{})
|
||||
if err2 == nil {
|
||||
return false, nil
|
||||
}
|
||||
if k8sutil.ResourceNotFound(err2) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err2
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deletePodDisruptionBudgets() error {
|
||||
errors := make([]string, 0)
|
||||
|
||||
if err := c.deletePrimaryPodDisruptionBudget(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
if err := c.deleteCriticalOpPodDisruptionBudget(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("%v", strings.Join(errors, `', '`))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deleteEndpoint(role PostgresRole) error {
|
||||
c.setProcessName("deleting endpoint")
|
||||
c.logger.Debugf("deleting %s endpoint", role)
|
||||
|
|
@ -705,7 +834,12 @@ func (c *Cluster) GetStatefulSet() *appsv1.StatefulSet {
|
|||
return c.Statefulset
|
||||
}
|
||||
|
||||
// GetPodDisruptionBudget returns cluster's kubernetes PodDisruptionBudget
|
||||
func (c *Cluster) GetPodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||
return c.PodDisruptionBudget
|
||||
// GetPrimaryPodDisruptionBudget returns cluster's primary kubernetes PodDisruptionBudget
|
||||
func (c *Cluster) GetPrimaryPodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||
return c.PrimaryPodDisruptionBudget
|
||||
}
|
||||
|
||||
// GetCriticalOpPodDisruptionBudget returns cluster's kubernetes PodDisruptionBudget for critical operations
|
||||
func (c *Cluster) GetCriticalOpPodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||
return c.CriticalOpPodDisruptionBudget
|
||||
}
|
||||
|
|
|
|||
|
|
@ -114,10 +114,10 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za
|
|||
}
|
||||
|
||||
for slotName, slotAndPublication := range databaseSlotsList {
|
||||
tables := slotAndPublication.Publication
|
||||
tableNames := make([]string, len(tables))
|
||||
newTables := slotAndPublication.Publication
|
||||
tableNames := make([]string, len(newTables))
|
||||
i := 0
|
||||
for t := range tables {
|
||||
for t := range newTables {
|
||||
tableName, schemaName := getTableSchema(t)
|
||||
tableNames[i] = fmt.Sprintf("%s.%s", schemaName, tableName)
|
||||
i++
|
||||
|
|
@ -126,6 +126,12 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za
|
|||
tableList := strings.Join(tableNames, ", ")
|
||||
|
||||
currentTables, exists := currentPublications[slotName]
|
||||
// if newTables is empty it means that it's definition was removed from streams section
|
||||
// but when slot is defined in manifest we should sync publications, too
|
||||
// by reusing current tables we make sure it is not
|
||||
if len(newTables) == 0 {
|
||||
tableList = currentTables
|
||||
}
|
||||
if !exists {
|
||||
createPublications[slotName] = tableList
|
||||
} else if currentTables != tableList {
|
||||
|
|
@ -350,16 +356,8 @@ func (c *Cluster) syncStreams() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
databaseSlots := make(map[string]map[string]zalandov1.Slot)
|
||||
slotsToSync := make(map[string]map[string]string)
|
||||
requiredPatroniConfig := c.Spec.Patroni
|
||||
|
||||
if len(requiredPatroniConfig.Slots) > 0 {
|
||||
for slotName, slotConfig := range requiredPatroniConfig.Slots {
|
||||
slotsToSync[slotName] = slotConfig
|
||||
}
|
||||
}
|
||||
|
||||
// create map with every database and empty slot defintion
|
||||
// we need it to detect removal of streams from databases
|
||||
if err := c.initDbConn(); err != nil {
|
||||
return fmt.Errorf("could not init database connection")
|
||||
}
|
||||
|
|
@ -372,13 +370,28 @@ func (c *Cluster) syncStreams() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not get list of databases: %v", err)
|
||||
}
|
||||
// get database name with empty list of slot, except template0 and template1
|
||||
databaseSlots := make(map[string]map[string]zalandov1.Slot)
|
||||
for dbName := range listDatabases {
|
||||
if dbName != "template0" && dbName != "template1" {
|
||||
databaseSlots[dbName] = map[string]zalandov1.Slot{}
|
||||
}
|
||||
}
|
||||
|
||||
// need to take explicitly defined slots into account whey syncing Patroni config
|
||||
slotsToSync := make(map[string]map[string]string)
|
||||
requiredPatroniConfig := c.Spec.Patroni
|
||||
if len(requiredPatroniConfig.Slots) > 0 {
|
||||
for slotName, slotConfig := range requiredPatroniConfig.Slots {
|
||||
slotsToSync[slotName] = slotConfig
|
||||
if _, exists := databaseSlots[slotConfig["database"]]; exists {
|
||||
databaseSlots[slotConfig["database"]][slotName] = zalandov1.Slot{
|
||||
Slot: slotConfig,
|
||||
Publication: make(map[string]acidv1.StreamTable),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get list of required slots and publications, group by database
|
||||
for _, stream := range c.Spec.Streams {
|
||||
if _, exists := databaseSlots[stream.Database]; !exists {
|
||||
|
|
@ -391,13 +404,13 @@ func (c *Cluster) syncStreams() error {
|
|||
"type": "logical",
|
||||
}
|
||||
slotName := getSlotName(stream.Database, stream.ApplicationId)
|
||||
if _, exists := databaseSlots[stream.Database][slotName]; !exists {
|
||||
slotAndPublication, exists := databaseSlots[stream.Database][slotName]
|
||||
if !exists {
|
||||
databaseSlots[stream.Database][slotName] = zalandov1.Slot{
|
||||
Slot: slot,
|
||||
Publication: stream.Tables,
|
||||
}
|
||||
} else {
|
||||
slotAndPublication := databaseSlots[stream.Database][slotName]
|
||||
streamTables := slotAndPublication.Publication
|
||||
for tableName, table := range stream.Tables {
|
||||
if _, exists := streamTables[tableName]; !exists {
|
||||
|
|
@ -492,16 +505,17 @@ func (c *Cluster) syncStream(appId string) error {
|
|||
continue
|
||||
}
|
||||
streamExists = true
|
||||
c.Streams[appId] = &stream
|
||||
desiredStreams := c.generateFabricEventStream(appId)
|
||||
if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) {
|
||||
c.logger.Infof("owner references of event streams with applicationId %s do not match the current ones", appId)
|
||||
stream.ObjectMeta.OwnerReferences = desiredStreams.ObjectMeta.OwnerReferences
|
||||
c.setProcessName("updating event streams with applicationId %s", appId)
|
||||
stream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), &stream, metav1.UpdateOptions{})
|
||||
updatedStream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), &stream, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not update event streams with applicationId %s: %v", appId, err)
|
||||
}
|
||||
c.Streams[appId] = stream
|
||||
c.Streams[appId] = updatedStream
|
||||
}
|
||||
if match, reason := c.compareStreams(&stream, desiredStreams); !match {
|
||||
c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason)
|
||||
|
|
@ -545,7 +559,7 @@ func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.Fab
|
|||
for newKey, newValue := range newEventStreams.Annotations {
|
||||
desiredAnnotations[newKey] = newValue
|
||||
}
|
||||
if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations); changed {
|
||||
if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations, nil); changed {
|
||||
match = false
|
||||
reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,8 +4,10 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -15,8 +17,6 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
|
|
@ -97,6 +97,11 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
// do not apply any major version related changes yet
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
}
|
||||
|
||||
if err = c.syncStatefulSet(); err != nil {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
err = fmt.Errorf("could not sync statefulsets: %v", err)
|
||||
|
|
@ -112,8 +117,8 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
|
||||
c.logger.Debug("syncing pod disruption budgets")
|
||||
if err = c.syncPodDisruptionBudget(false); err != nil {
|
||||
err = fmt.Errorf("could not sync pod disruption budget: %v", err)
|
||||
if err = c.syncPodDisruptionBudgets(false); err != nil {
|
||||
err = fmt.Errorf("could not sync pod disruption budgets: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -148,7 +153,10 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
return fmt.Errorf("could not sync connection pooler: %v", err)
|
||||
}
|
||||
|
||||
if len(c.Spec.Streams) > 0 {
|
||||
// sync if manifest stream count is different from stream CR count
|
||||
// it can be that they are always different due to grouping of manifest streams
|
||||
// but we would catch missed removals on update
|
||||
if len(c.Spec.Streams) != len(c.Streams) {
|
||||
c.logger.Debug("syncing streams")
|
||||
if err = c.syncStreams(); err != nil {
|
||||
err = fmt.Errorf("could not sync streams: %v", err)
|
||||
|
|
@ -230,7 +238,7 @@ func (c *Cluster) syncPatroniConfigMap(suffix string) error {
|
|||
maps.Copy(annotations, cm.Annotations)
|
||||
// Patroni can add extra annotations so incl. current annotations in desired annotations
|
||||
desiredAnnotations := c.annotationsSet(cm.Annotations)
|
||||
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed {
|
||||
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed {
|
||||
patchData, err := metaAnnotationsPatch(desiredAnnotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for %s config map: %v", configMapName, err)
|
||||
|
|
@ -275,7 +283,7 @@ func (c *Cluster) syncPatroniEndpoint(suffix string) error {
|
|||
maps.Copy(annotations, ep.Annotations)
|
||||
// Patroni can add extra annotations so incl. current annotations in desired annotations
|
||||
desiredAnnotations := c.annotationsSet(ep.Annotations)
|
||||
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed {
|
||||
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed {
|
||||
patchData, err := metaAnnotationsPatch(desiredAnnotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for %s endpoint: %v", endpointName, err)
|
||||
|
|
@ -320,7 +328,7 @@ func (c *Cluster) syncPatroniService() error {
|
|||
maps.Copy(annotations, svc.Annotations)
|
||||
// Patroni can add extra annotations so incl. current annotations in desired annotations
|
||||
desiredAnnotations := c.annotationsSet(svc.Annotations)
|
||||
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed {
|
||||
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed {
|
||||
patchData, err := metaAnnotationsPatch(desiredAnnotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for %s service: %v", serviceName, err)
|
||||
|
|
@ -412,7 +420,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
|||
return fmt.Errorf("could not update %s endpoint: %v", role, err)
|
||||
}
|
||||
} else {
|
||||
if changed, _ := c.compareAnnotations(ep.Annotations, desiredEp.Annotations); changed {
|
||||
if changed, _ := c.compareAnnotations(ep.Annotations, desiredEp.Annotations, nil); changed {
|
||||
patchData, err := metaAnnotationsPatch(desiredEp.Annotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for %s endpoint: %v", role, err)
|
||||
|
|
@ -447,22 +455,22 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
||||
func (c *Cluster) syncPrimaryPodDisruptionBudget(isUpdate bool) error {
|
||||
var (
|
||||
pdb *policyv1.PodDisruptionBudget
|
||||
err error
|
||||
)
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil {
|
||||
c.PodDisruptionBudget = pdb
|
||||
newPDB := c.generatePodDisruptionBudget()
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.PrimaryPodDisruptionBudgetName(), metav1.GetOptions{}); err == nil {
|
||||
c.PrimaryPodDisruptionBudget = pdb
|
||||
newPDB := c.generatePrimaryPodDisruptionBudget()
|
||||
match, reason := c.comparePodDisruptionBudget(pdb, newPDB)
|
||||
if !match {
|
||||
c.logPDBChanges(pdb, newPDB, isUpdate, reason)
|
||||
if err = c.updatePodDisruptionBudget(newPDB); err != nil {
|
||||
if err = c.updatePrimaryPodDisruptionBudget(newPDB); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c.PodDisruptionBudget = pdb
|
||||
c.PrimaryPodDisruptionBudget = pdb
|
||||
}
|
||||
return nil
|
||||
|
||||
|
|
@ -471,21 +479,74 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
|||
return fmt.Errorf("could not get pod disruption budget: %v", err)
|
||||
}
|
||||
// no existing pod disruption budget, create new one
|
||||
c.logger.Infof("could not find the cluster's pod disruption budget")
|
||||
c.logger.Infof("could not find the primary pod disruption budget")
|
||||
|
||||
if pdb, err = c.createPodDisruptionBudget(); err != nil {
|
||||
if err = c.createPrimaryPodDisruptionBudget(); err != nil {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
return fmt.Errorf("could not create pod disruption budget: %v", err)
|
||||
return fmt.Errorf("could not create primary pod disruption budget: %v", err)
|
||||
}
|
||||
c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta))
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil {
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.PrimaryPodDisruptionBudgetName(), metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta))
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.Infof("created missing pod disruption budget %q", util.NameFromMeta(pdb.ObjectMeta))
|
||||
c.PodDisruptionBudget = pdb
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncCriticalOpPodDisruptionBudget(isUpdate bool) error {
|
||||
var (
|
||||
pdb *policyv1.PodDisruptionBudget
|
||||
err error
|
||||
)
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.criticalOpPodDisruptionBudgetName(), metav1.GetOptions{}); err == nil {
|
||||
c.CriticalOpPodDisruptionBudget = pdb
|
||||
newPDB := c.generateCriticalOpPodDisruptionBudget()
|
||||
match, reason := c.comparePodDisruptionBudget(pdb, newPDB)
|
||||
if !match {
|
||||
c.logPDBChanges(pdb, newPDB, isUpdate, reason)
|
||||
if err = c.updateCriticalOpPodDisruptionBudget(newPDB); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c.CriticalOpPodDisruptionBudget = pdb
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
return fmt.Errorf("could not get pod disruption budget: %v", err)
|
||||
}
|
||||
// no existing pod disruption budget, create new one
|
||||
c.logger.Infof("could not find pod disruption budget for critical operations")
|
||||
|
||||
if err = c.createCriticalOpPodDisruptionBudget(); err != nil {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
return fmt.Errorf("could not create pod disruption budget for critical operations: %v", err)
|
||||
}
|
||||
c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta))
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.criticalOpPodDisruptionBudgetName(), metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncPodDisruptionBudgets(isUpdate bool) error {
|
||||
errors := make([]string, 0)
|
||||
|
||||
if err := c.syncPrimaryPodDisruptionBudget(isUpdate); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
if err := c.syncCriticalOpPodDisruptionBudget(isUpdate); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("%v", strings.Join(errors, `', '`))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -497,6 +558,7 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
)
|
||||
podsToRecreate := make([]v1.Pod, 0)
|
||||
isSafeToRecreatePods := true
|
||||
postponeReasons := make([]string, 0)
|
||||
switchoverCandidates := make([]spec.NamespacedName, 0)
|
||||
|
||||
pods, err := c.listPods()
|
||||
|
|
@ -561,13 +623,22 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
|
||||
cmp := c.compareStatefulSetWith(desiredSts)
|
||||
if !cmp.rollingUpdate {
|
||||
updatedPodAnnotations := map[string]*string{}
|
||||
for _, anno := range cmp.deletedPodAnnotations {
|
||||
updatedPodAnnotations[anno] = nil
|
||||
}
|
||||
for anno, val := range desiredSts.Spec.Template.Annotations {
|
||||
updatedPodAnnotations[anno] = &val
|
||||
}
|
||||
metadataReq := map[string]map[string]map[string]*string{"metadata": {"annotations": updatedPodAnnotations}}
|
||||
patch, err := json.Marshal(metadataReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for pod annotations: %v", err)
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if changed, _ := c.compareAnnotations(pod.Annotations, desiredSts.Spec.Template.Annotations); changed {
|
||||
patchData, err := metaAnnotationsPatch(desiredSts.Spec.Template.Annotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for pod %q annotations: %v", pod.Name, err)
|
||||
}
|
||||
_, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||
if changed, _ := c.compareAnnotations(pod.Annotations, desiredSts.Spec.Template.Annotations, nil); changed {
|
||||
_, err = c.KubeClient.Pods(c.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch annotations for pod %q: %v", pod.Name, err)
|
||||
}
|
||||
|
|
@ -646,12 +717,14 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
c.logger.Debug("syncing Patroni config")
|
||||
if configPatched, restartPrimaryFirst, restartWait, err = c.syncPatroniConfig(pods, c.Spec.Patroni, requiredPgParameters); err != nil {
|
||||
c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err)
|
||||
postponeReasons = append(postponeReasons, "errors during Patroni config sync")
|
||||
isSafeToRecreatePods = false
|
||||
}
|
||||
|
||||
// restart Postgres where it is still pending
|
||||
if err = c.restartInstances(pods, restartWait, restartPrimaryFirst); err != nil {
|
||||
c.logger.Errorf("errors while restarting Postgres in pods via Patroni API: %v", err)
|
||||
postponeReasons = append(postponeReasons, "errors while restarting Postgres via Patroni API")
|
||||
isSafeToRecreatePods = false
|
||||
}
|
||||
|
||||
|
|
@ -666,7 +739,7 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
}
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Rolling update done - pods have been recreated")
|
||||
} else {
|
||||
c.logger.Warningf("postpone pod recreation until next sync because of errors during config sync")
|
||||
c.logger.Warningf("postpone pod recreation until next sync - reason: %s", strings.Join(postponeReasons, `', '`))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1049,13 +1122,14 @@ func (c *Cluster) updateSecret(
|
|||
// fetch user map to update later
|
||||
var userMap map[string]spec.PgUser
|
||||
var userKey string
|
||||
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
||||
switch secretUsername {
|
||||
case c.systemUsers[constants.SuperuserKeyName].Name:
|
||||
userKey = constants.SuperuserKeyName
|
||||
userMap = c.systemUsers
|
||||
} else if secretUsername == c.systemUsers[constants.ReplicationUserKeyName].Name {
|
||||
case c.systemUsers[constants.ReplicationUserKeyName].Name:
|
||||
userKey = constants.ReplicationUserKeyName
|
||||
userMap = c.systemUsers
|
||||
} else {
|
||||
default:
|
||||
userKey = secretUsername
|
||||
userMap = c.pgUsers
|
||||
}
|
||||
|
|
@ -1135,14 +1209,14 @@ func (c *Cluster) updateSecret(
|
|||
}
|
||||
|
||||
if updateSecret {
|
||||
c.logger.Infof(updateSecretMsg)
|
||||
c.logger.Infof("%s", updateSecretMsg)
|
||||
if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update secret %s: %v", secretName, err)
|
||||
}
|
||||
c.Secrets[secret.UID] = secret
|
||||
}
|
||||
|
||||
if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations); changed {
|
||||
if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations, nil); changed {
|
||||
patchData, err := metaAnnotationsPatch(generatedSecret.Annotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for secret %q annotations: %v", secret.Name, err)
|
||||
|
|
@ -1587,19 +1661,38 @@ func (c *Cluster) syncLogicalBackupJob() error {
|
|||
}
|
||||
c.logger.Infof("logical backup job %s updated", c.getLogicalBackupJobName())
|
||||
}
|
||||
if match, reason := c.compareLogicalBackupJob(job, desiredJob); !match {
|
||||
if cmp := c.compareLogicalBackupJob(job, desiredJob); !cmp.match {
|
||||
c.logger.Infof("logical job %s is not in the desired state and needs to be updated",
|
||||
c.getLogicalBackupJobName(),
|
||||
)
|
||||
if reason != "" {
|
||||
c.logger.Infof("reason: %s", reason)
|
||||
if len(cmp.reasons) != 0 {
|
||||
for _, reason := range cmp.reasons {
|
||||
c.logger.Infof("reason: %s", reason)
|
||||
}
|
||||
}
|
||||
if len(cmp.deletedPodAnnotations) != 0 {
|
||||
templateMetadataReq := map[string]map[string]map[string]map[string]map[string]map[string]map[string]*string{
|
||||
"spec": {"jobTemplate": {"spec": {"template": {"metadata": {"annotations": {}}}}}}}
|
||||
for _, anno := range cmp.deletedPodAnnotations {
|
||||
templateMetadataReq["spec"]["jobTemplate"]["spec"]["template"]["metadata"]["annotations"][anno] = nil
|
||||
}
|
||||
patch, err := json.Marshal(templateMetadataReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal ObjectMeta for logical backup job %q pod template: %v", jobName, err)
|
||||
}
|
||||
|
||||
job, err = c.KubeClient.CronJobs(c.Namespace).Patch(context.TODO(), jobName, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "")
|
||||
if err != nil {
|
||||
c.logger.Errorf("failed to remove annotations from the logical backup job %q pod template: %v", jobName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = c.patchLogicalBackupJob(desiredJob); err != nil {
|
||||
return fmt.Errorf("could not update logical backup job to match desired state: %v", err)
|
||||
}
|
||||
c.logger.Info("the logical backup job is synced")
|
||||
}
|
||||
if changed, _ := c.compareAnnotations(job.Annotations, desiredJob.Annotations); changed {
|
||||
if changed, _ := c.compareAnnotations(job.Annotations, desiredJob.Annotations, nil); changed {
|
||||
patchData, err := metaAnnotationsPatch(desiredJob.Annotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for the logical backup job %q: %v", jobName, err)
|
||||
|
|
|
|||
|
|
@ -2,15 +2,14 @@ package cluster
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
@ -142,6 +141,181 @@ func TestSyncStatefulSetsAnnotations(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPodAnnotationsSync(t *testing.T) {
|
||||
clusterName := "acid-test-cluster-2"
|
||||
namespace := "default"
|
||||
podAnnotation := "no-scale-down"
|
||||
podAnnotations := map[string]string{podAnnotation: "true"}
|
||||
customPodAnnotation := "foo"
|
||||
customPodAnnotations := map[string]string{customPodAnnotation: "true"}
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockClient := mocks.NewMockHTTPClient(ctrl)
|
||||
client, _ := newFakeK8sAnnotationsClient()
|
||||
|
||||
pg := acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1Gi",
|
||||
},
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
EnableLogicalBackup: true,
|
||||
EnableReplicaConnectionPooler: boolToPointer(true),
|
||||
PodAnnotations: podAnnotations,
|
||||
NumberOfInstances: 2,
|
||||
},
|
||||
}
|
||||
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
PatroniAPICheckInterval: time.Duration(1),
|
||||
PatroniAPICheckTimeout: time.Duration(5),
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
CustomPodAnnotations: customPodAnnotations,
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: k8sutil.Int32ToPointer(1),
|
||||
},
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
DefaultCPURequest: "300m",
|
||||
DefaultCPULimit: "300m",
|
||||
DefaultMemoryRequest: "300Mi",
|
||||
DefaultMemoryLimit: "300Mi",
|
||||
MaxInstances: -1,
|
||||
PodRoleLabel: "spilo-role",
|
||||
ResourceCheckInterval: time.Duration(3),
|
||||
ResourceCheckTimeout: time.Duration(10),
|
||||
},
|
||||
},
|
||||
}, client, pg, logger, eventRecorder)
|
||||
|
||||
configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}`
|
||||
response := http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewReader([]byte(configJson))),
|
||||
}
|
||||
|
||||
mockClient.EXPECT().Do(gomock.Any()).Return(&response, nil).AnyTimes()
|
||||
cluster.patroni = patroni.New(patroniLogger, mockClient)
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
clusterOptions := clusterLabelsOptions(cluster)
|
||||
|
||||
// create a statefulset
|
||||
_, err := cluster.createStatefulSet()
|
||||
assert.NoError(t, err)
|
||||
// create a pods
|
||||
podsList := createPods(cluster)
|
||||
for _, pod := range podsList {
|
||||
_, err = cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
// create connection pooler
|
||||
_, err = cluster.createConnectionPooler(mockInstallLookupFunction)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create cron job
|
||||
err = cluster.createLogicalBackupJob()
|
||||
assert.NoError(t, err)
|
||||
|
||||
annotateResources(cluster)
|
||||
err = cluster.Sync(&cluster.Postgresql)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// 1. PodAnnotations set
|
||||
stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions)
|
||||
assert.NoError(t, err)
|
||||
for _, sts := range stsList.Items {
|
||||
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
|
||||
assert.Contains(t, sts.Spec.Template.Annotations, annotation)
|
||||
}
|
||||
}
|
||||
|
||||
for _, role := range []PostgresRole{Master, Replica} {
|
||||
deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
|
||||
assert.Contains(t, deploy.Spec.Template.Annotations, annotation,
|
||||
fmt.Sprintf("pooler deployment pod template %s should contain annotation %s, found %#v",
|
||||
deploy.Name, annotation, deploy.Spec.Template.Annotations))
|
||||
}
|
||||
}
|
||||
|
||||
podList, err := cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions)
|
||||
assert.NoError(t, err)
|
||||
for _, pod := range podList.Items {
|
||||
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
|
||||
assert.Contains(t, pod.Annotations, annotation,
|
||||
fmt.Sprintf("pod %s should contain annotation %s, found %#v", pod.Name, annotation, pod.Annotations))
|
||||
}
|
||||
}
|
||||
|
||||
cronJobList, err := cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions)
|
||||
assert.NoError(t, err)
|
||||
for _, cronJob := range cronJobList.Items {
|
||||
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
|
||||
assert.Contains(t, cronJob.Spec.JobTemplate.Spec.Template.Annotations, annotation,
|
||||
fmt.Sprintf("logical backup cron job's pod template should contain annotation %s, found %#v",
|
||||
annotation, cronJob.Spec.JobTemplate.Spec.Template.Annotations))
|
||||
}
|
||||
}
|
||||
|
||||
// 2 PodAnnotations removed
|
||||
newSpec := cluster.Postgresql.DeepCopy()
|
||||
newSpec.Spec.PodAnnotations = nil
|
||||
cluster.OpConfig.CustomPodAnnotations = nil
|
||||
err = cluster.Sync(newSpec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
stsList, err = cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions)
|
||||
assert.NoError(t, err)
|
||||
for _, sts := range stsList.Items {
|
||||
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
|
||||
assert.NotContains(t, sts.Spec.Template.Annotations, annotation)
|
||||
}
|
||||
}
|
||||
|
||||
for _, role := range []PostgresRole{Master, Replica} {
|
||||
deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
|
||||
assert.NotContains(t, deploy.Spec.Template.Annotations, annotation,
|
||||
fmt.Sprintf("pooler deployment pod template %s should not contain annotation %s, found %#v",
|
||||
deploy.Name, annotation, deploy.Spec.Template.Annotations))
|
||||
}
|
||||
}
|
||||
|
||||
podList, err = cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions)
|
||||
assert.NoError(t, err)
|
||||
for _, pod := range podList.Items {
|
||||
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
|
||||
assert.NotContains(t, pod.Annotations, annotation,
|
||||
fmt.Sprintf("pod %s should not contain annotation %s, found %#v", pod.Name, annotation, pod.Annotations))
|
||||
}
|
||||
}
|
||||
|
||||
cronJobList, err = cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions)
|
||||
assert.NoError(t, err)
|
||||
for _, cronJob := range cronJobList.Items {
|
||||
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
|
||||
assert.NotContains(t, cronJob.Spec.JobTemplate.Spec.Template.Annotations, annotation,
|
||||
fmt.Sprintf("logical backup cron job's pod template should not contain annotation %s, found %#v",
|
||||
annotation, cronJob.Spec.JobTemplate.Spec.Template.Annotations))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) {
|
||||
testName := "test config comparison"
|
||||
client, _ := newFakeK8sSyncClient()
|
||||
|
|
|
|||
|
|
@ -58,15 +58,16 @@ type WorkerStatus struct {
|
|||
|
||||
// ClusterStatus describes status of the cluster
|
||||
type ClusterStatus struct {
|
||||
Team string
|
||||
Cluster string
|
||||
Namespace string
|
||||
MasterService *v1.Service
|
||||
ReplicaService *v1.Service
|
||||
MasterEndpoint *v1.Endpoints
|
||||
ReplicaEndpoint *v1.Endpoints
|
||||
StatefulSet *appsv1.StatefulSet
|
||||
PodDisruptionBudget *policyv1.PodDisruptionBudget
|
||||
Team string
|
||||
Cluster string
|
||||
Namespace string
|
||||
MasterService *v1.Service
|
||||
ReplicaService *v1.Service
|
||||
MasterEndpoint *v1.Endpoints
|
||||
ReplicaEndpoint *v1.Endpoints
|
||||
StatefulSet *appsv1.StatefulSet
|
||||
PrimaryPodDisruptionBudget *policyv1.PodDisruptionBudget
|
||||
CriticalOpPodDisruptionBudget *policyv1.PodDisruptionBudget
|
||||
|
||||
CurrentProcess Process
|
||||
Worker uint32
|
||||
|
|
|
|||
|
|
@ -257,9 +257,9 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
|||
if teamID == "" {
|
||||
msg := "no teamId specified"
|
||||
if c.OpConfig.EnableTeamIdClusternamePrefix {
|
||||
return nil, fmt.Errorf(msg)
|
||||
return nil, fmt.Errorf("%s", msg)
|
||||
}
|
||||
c.logger.Warnf(msg)
|
||||
c.logger.Warnf("%s", msg)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
@ -663,7 +663,7 @@ func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (ac
|
|||
return resources, nil
|
||||
}
|
||||
|
||||
func isInMainternanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool {
|
||||
func isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool {
|
||||
if len(specMaintenanceWindows) == 0 {
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -247,18 +247,18 @@ func createPods(cluster *Cluster) []v1.Pod {
|
|||
for i, role := range []PostgresRole{Master, Replica} {
|
||||
podsList = append(podsList, v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", clusterName, i),
|
||||
Name: fmt.Sprintf("%s-%d", cluster.Name, i),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"application": "spilo",
|
||||
"cluster-name": clusterName,
|
||||
"cluster-name": cluster.Name,
|
||||
"spilo-role": string(role),
|
||||
},
|
||||
},
|
||||
})
|
||||
podsList = append(podsList, v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-pooler-%s", clusterName, role),
|
||||
Name: fmt.Sprintf("%s-pooler-%s", cluster.Name, role),
|
||||
Namespace: namespace,
|
||||
Labels: cluster.connectionPoolerLabels(role, true).MatchLabels,
|
||||
},
|
||||
|
|
@ -329,7 +329,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = cluster.createPodDisruptionBudget()
|
||||
err = cluster.createPodDisruptionBudgets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -705,8 +705,8 @@ func TestIsInMaintenanceWindow(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cluster.Spec.MaintenanceWindows = tt.windows
|
||||
if isInMainternanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected {
|
||||
t.Errorf("Expected isInMainternanceWindow to return %t", tt.expected)
|
||||
if isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected {
|
||||
t.Errorf("Expected isInMaintenanceWindow to return %t", tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ func (c *Cluster) syncUnderlyingEBSVolume() error {
|
|||
|
||||
if len(errors) > 0 {
|
||||
for _, s := range errors {
|
||||
c.logger.Warningf(s)
|
||||
c.logger.Warningf("%s", s)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -225,7 +225,7 @@ func (c *Cluster) syncVolumeClaims() error {
|
|||
}
|
||||
|
||||
newAnnotations := c.annotationsSet(nil)
|
||||
if changed, _ := c.compareAnnotations(pvc.Annotations, newAnnotations); changed {
|
||||
if changed, _ := c.compareAnnotations(pvc.Annotations, newAnnotations, nil); changed {
|
||||
patchData, err := metaAnnotationsPatch(newAnnotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for the persistent volume claim for volume %q: %v", pvc.Name, err)
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p2")
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p3")
|
||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
result.MaxInstances = fromCRD.MaxInstances
|
||||
|
|
@ -180,7 +180,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
|
||||
// logical backup config
|
||||
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0")
|
||||
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
||||
result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName
|
||||
result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey
|
||||
|
|
|
|||
|
|
@ -597,7 +597,7 @@ func (c *Controller) createPodServiceAccount(namespace string) error {
|
|||
_, err := c.KubeClient.ServiceAccounts(namespace).Get(context.TODO(), podServiceAccountName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
|
||||
c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace))
|
||||
c.logger.Infof("creating pod service account %q in the %q namespace", podServiceAccountName, namespace)
|
||||
|
||||
// get a separate copy of service account
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
|
|
|
|||
|
|
@ -248,7 +248,7 @@ func (c *Controller) getInfrastructureRoles(
|
|||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return uniqRoles, fmt.Errorf(strings.Join(errors, `', '`))
|
||||
return uniqRoles, fmt.Errorf("%s", strings.Join(errors, `', '`))
|
||||
}
|
||||
|
||||
return uniqRoles, nil
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2024 Compose, Zalando SE
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue