Merge branch 'master' into master
This commit is contained in:
commit
1fac45b709
|
|
@ -9,7 +9,7 @@ assignees: ''
|
|||
|
||||
Please, answer some short questions which should help us to understand your problem / question better?
|
||||
|
||||
- **Which image of the operator are you using?** e.g. ghcr.io/zalando/postgres-operator:v1.13.0
|
||||
- **Which image of the operator are you using?** e.g. ghcr.io/zalando/postgres-operator:v1.15.1
|
||||
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
|
||||
- **Are you running Postgres Operator in production?** [yes | no]
|
||||
- **Type of issue?** [Bug report, question, feature request, etc.]
|
||||
|
|
|
|||
|
|
@ -23,10 +23,10 @@ jobs:
|
|||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "^1.23.4"
|
||||
go-version: "^1.25.3"
|
||||
|
||||
- name: Run unit tests
|
||||
run: make deps mocks test
|
||||
run: make test
|
||||
|
||||
- name: Define image name
|
||||
id: image
|
||||
|
|
|
|||
|
|
@ -14,9 +14,9 @@ jobs:
|
|||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "^1.23.4"
|
||||
go-version: "^1.25.3"
|
||||
- name: Make dependencies
|
||||
run: make deps mocks
|
||||
run: make mocks
|
||||
- name: Code generation
|
||||
run: make codegen
|
||||
- name: Run unit tests
|
||||
|
|
|
|||
|
|
@ -14,9 +14,9 @@ jobs:
|
|||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "^1.23.4"
|
||||
go-version: "^1.25.3"
|
||||
- name: Make dependencies
|
||||
run: make deps mocks
|
||||
run: make mocks
|
||||
- name: Compile
|
||||
run: make linux
|
||||
- name: Run unit tests
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
# global owners
|
||||
* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet @macedigital
|
||||
* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet @mikkeloscar
|
||||
|
|
|
|||
2
LICENSE
2
LICENSE
|
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2024 Zalando SE
|
||||
Copyright (c) 2025 Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -4,4 +4,4 @@ Jan Mussler <jan.mussler@zalando.de>
|
|||
Jociele Padilha <jociele.padilha@zalando.de>
|
||||
Ida Novindasari <ida.novindasari@zalando.de>
|
||||
Polina Bungina <polina.bungina@zalando.de>
|
||||
Matthias Adler <matthias.adler@zalando.de>
|
||||
Mikkel Larsen <mikkel.larsen@zalando.de>
|
||||
|
|
|
|||
68
Makefile
68
Makefile
|
|
@ -12,13 +12,17 @@ LOCAL_BUILD_FLAGS ?= $(BUILD_FLAGS)
|
|||
LDFLAGS ?= -X=main.version=$(VERSION)
|
||||
DOCKERDIR = docker
|
||||
|
||||
IMAGE ?= registry.opensource.zalan.do/acid/$(BINARY)
|
||||
BASE_IMAGE ?= alpine:latest
|
||||
IMAGE ?= ghcr.io/zalando/$(BINARY)
|
||||
TAG ?= $(VERSION)
|
||||
GITHEAD = $(shell git rev-parse --short HEAD)
|
||||
GITURL = $(shell git config --get remote.origin.url)
|
||||
GITSTATUS = $(shell git status --porcelain || echo "no changes")
|
||||
SOURCES = cmd/main.go
|
||||
VERSION ?= $(shell git describe --tags --always --dirty)
|
||||
CRD_SOURCES = $(shell find pkg/apis/zalando.org pkg/apis/acid.zalan.do -name '*.go' -not -name '*.deepcopy.go')
|
||||
GENERATED_CRDS = manifests/postgresteam.crd.yaml manifests/postgresql.crd.yaml pkg/apis/acid.zalan.do/v1/postgresql.crd.yaml
|
||||
GENERATED = pkg/apis/zalando.org/v1/zz_generated.deepcopy.go pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go
|
||||
DIRS := cmd pkg
|
||||
PKG := `go list ./... | grep -v /vendor/`
|
||||
|
||||
|
|
@ -42,46 +46,61 @@ ifndef GOPATH
|
|||
GOPATH := $(HOME)/go
|
||||
endif
|
||||
|
||||
PATH := $(GOPATH)/bin:$(PATH)
|
||||
SHELL := env PATH=$(PATH) $(SHELL)
|
||||
PATH := $(GOPATH)/bin:$(PATH)
|
||||
SHELL := env PATH="$(PATH)" $(SHELL)
|
||||
IMAGE_TAG := $(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)
|
||||
|
||||
default: local
|
||||
|
||||
clean:
|
||||
rm -rf build
|
||||
rm $(GENERATED)
|
||||
rm $(GENERATED_CRDS)
|
||||
|
||||
local: ${SOURCES}
|
||||
verify:
|
||||
hack/verify-codegen.sh
|
||||
CGO_ENABLED=${CGO_ENABLED} go build -o build/${BINARY} $(LOCAL_BUILD_FLAGS) -ldflags "$(LDFLAGS)" $^
|
||||
|
||||
linux: ${SOURCES}
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -o build/linux/${BINARY} ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $^
|
||||
$(GENERATED): go.mod $(CRD_SOURCES)
|
||||
hack/update-codegen.sh
|
||||
|
||||
macos: ${SOURCES}
|
||||
GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -o build/macos/${BINARY} ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $^
|
||||
$(GENERATED_CRDS): $(GENERATED)
|
||||
go tool controller-gen crd:crdVersions=v1,allowDangerousTypes=true paths=./pkg/apis/acid.zalan.do/... output:crd:dir=manifests
|
||||
# only generate postgresteam.crd.yaml and postgresql.crd.yaml for now
|
||||
@rm manifests/acid.zalan.do_operatorconfigurations.yaml
|
||||
@mv manifests/acid.zalan.do_postgresqls.yaml manifests/postgresql.crd.yaml
|
||||
@# hack to use lowercase kind and listKind
|
||||
@sed -i -e 's/kind: Postgresql/kind: postgresql/' manifests/postgresql.crd.yaml
|
||||
@sed -i -e 's/listKind: PostgresqlList/listKind: postgresqlList/' manifests/postgresql.crd.yaml
|
||||
@hack/adjust_postgresql_crd.sh
|
||||
@mv manifests/acid.zalan.do_postgresteams.yaml manifests/postgresteam.crd.yaml
|
||||
@cp manifests/postgresql.crd.yaml pkg/apis/acid.zalan.do/v1/postgresql.crd.yaml
|
||||
|
||||
docker: ${DOCKERDIR}/${DOCKERFILE}
|
||||
local: ${SOURCES} $(GENERATED_CRDS)
|
||||
CGO_ENABLED=${CGO_ENABLED} go build -o build/${BINARY} $(LOCAL_BUILD_FLAGS) -ldflags "$(LDFLAGS)" $(SOURCES)
|
||||
|
||||
wasm: ${SOURCES} $(GENERATED_CRDS)
|
||||
GOOS=wasip1 GOARCH=wasm CGO_ENABLED=${CGO_ENABLED} go build -o build/${BINARY}.wasm ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $(SOURCES)
|
||||
|
||||
linux: ${SOURCES} $(GENERATED_CRDS)
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -o build/linux/${BINARY} ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $(SOURCES)
|
||||
|
||||
macos: ${SOURCES} $(GENERATED_CRDS)
|
||||
GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -o build/macos/${BINARY} ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $(SOURCES)
|
||||
|
||||
docker: $(GENERATED_CRDS) ${DOCKERDIR}/${DOCKERFILE}
|
||||
echo `(env)`
|
||||
echo "Tag ${TAG}"
|
||||
echo "Version ${VERSION}"
|
||||
echo "CDP tag ${CDP_TAG}"
|
||||
echo "git describe $(shell git describe --tags --always --dirty)"
|
||||
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" .
|
||||
docker build --rm -t "$(IMAGE_TAG)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" --build-arg BASE_IMAGE="${BASE_IMAGE}" .
|
||||
|
||||
indocker-race:
|
||||
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.23.4 bash -c "make linux"
|
||||
|
||||
push:
|
||||
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
|
||||
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.25.3 bash -c "make linux"
|
||||
|
||||
mocks:
|
||||
GO111MODULE=on go generate ./...
|
||||
|
||||
tools:
|
||||
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.30.4
|
||||
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
fmt:
|
||||
@gofmt -l -w -s $(DIRS)
|
||||
|
||||
|
|
@ -89,15 +108,10 @@ vet:
|
|||
@go vet $(PKG)
|
||||
@staticcheck $(PKG)
|
||||
|
||||
deps: tools
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
test:
|
||||
hack/verify-codegen.sh
|
||||
test: mocks $(GENERATED) $(GENERATED_CRDS)
|
||||
GO111MODULE=on go test ./...
|
||||
|
||||
codegen:
|
||||
hack/update-codegen.sh
|
||||
codegen: $(GENERATED)
|
||||
|
||||
e2e: docker # build operator image to be tested
|
||||
cd e2e; make e2etest
|
||||
|
|
|
|||
16
README.md
16
README.md
|
|
@ -17,6 +17,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
|||
* Live volume resize without pod restarts (AWS EBS, PVC)
|
||||
* Database connection pooling with PGBouncer
|
||||
* Support fast in place major version upgrade. Supports global upgrade of all clusters.
|
||||
* Pod protection during bootstrap phase and configurable maintenance windows
|
||||
* Restore and cloning Postgres clusters on AWS, GCS and Azure
|
||||
* Additionally logical backups to S3 or GCS bucket can be configured
|
||||
* Standby cluster from S3 or GCS WAL archive
|
||||
|
|
@ -28,25 +29,30 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
|||
|
||||
### PostgreSQL features
|
||||
|
||||
* Supports PostgreSQL 17, starting from 13+
|
||||
* Supports PostgreSQL 18, starting from 14+
|
||||
* Streaming replication cluster via Patroni
|
||||
* Point-In-Time-Recovery with
|
||||
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) /
|
||||
[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
|
||||
[pg_basebackup](https://www.postgresql.org/docs/18/app-pgbasebackup.html) /
|
||||
[WAL-G](https://github.com/wal-g/wal-g) or [WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
|
||||
* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),
|
||||
[pg_stat_statements](https://www.postgresql.org/docs/17/pgstatstatements.html),
|
||||
[pg_stat_statements](https://www.postgresql.org/docs/18/pgstatstatements.html),
|
||||
[pgextwlist](https://github.com/dimitri/pgextwlist),
|
||||
[pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon)
|
||||
* Incl. popular Postgres extensions such as
|
||||
[decoderbufs](https://github.com/debezium/postgres-decoderbufs),
|
||||
[hypopg](https://github.com/HypoPG/hypopg),
|
||||
[pg_cron](https://github.com/citusdata/pg_cron),
|
||||
[pg_repack](https://github.com/reorg/pg_repack),
|
||||
[pg_partman](https://github.com/pgpartman/pg_partman),
|
||||
[pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache),
|
||||
[pg_audit](https://github.com/pgaudit/pgaudit),
|
||||
[pgfaceting](https://github.com/cybertec-postgresql/pgfaceting),
|
||||
[pgq](https://github.com/pgq/pgq),
|
||||
[pgvector](https://github.com/pgvector/pgvector),
|
||||
[plpgsql_check](https://github.com/okbob/plpgsql_check),
|
||||
[plproxy](https://github.com/plproxy/plproxy),
|
||||
[postgis](https://postgis.net/),
|
||||
[roaringbitmap](https://github.com/ChenHuajun/pg_roaringbitmap),
|
||||
[set_user](https://github.com/pgaudit/set_user) and
|
||||
[timescaledb](https://github.com/timescale/timescaledb)
|
||||
|
||||
|
|
@ -57,12 +63,12 @@ production for over five years.
|
|||
|
||||
| Release | Postgres versions | K8s versions | Golang |
|
||||
| :-------- | :---------------: | :---------------: | :-----: |
|
||||
| v1.15.1 | 13 → 17 | 1.27+ | 1.25.3 |
|
||||
| v1.14.0 | 13 → 17 | 1.27+ | 1.23.4 |
|
||||
| v1.13.0 | 12 → 16 | 1.27+ | 1.22.5 |
|
||||
| v1.12.0 | 11 → 16 | 1.27+ | 1.22.3 |
|
||||
| v1.11.0 | 11 → 16 | 1.27+ | 1.21.7 |
|
||||
| v1.10.1 | 10 → 15 | 1.21+ | 1.19.8 |
|
||||
| v1.9.0 | 10 → 15 | 1.21+ | 1.18.9 |
|
||||
|
||||
## Getting started
|
||||
|
||||
|
|
|
|||
|
|
@ -9,4 +9,4 @@ mkdir -p "$team_repo"
|
|||
ln -s "$PWD" "$project_dir"
|
||||
cd "$project_dir"
|
||||
|
||||
make deps clean docker push
|
||||
make clean docker push
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: postgres-operator-ui
|
||||
version: 1.14.0
|
||||
appVersion: 1.14.0
|
||||
version: 1.15.1
|
||||
appVersion: 1.15.1
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,32 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator-ui:
|
||||
- apiVersion: v2
|
||||
appVersion: 1.15.1
|
||||
created: "2025-12-11T12:44:25.470723322+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 4bbb750934366038d692711f924151182b7be131b6822d011f5a4e51cf609482
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.15.1.tgz
|
||||
version: 1.15.1
|
||||
- apiVersion: v2
|
||||
appVersion: 1.14.0
|
||||
created: "2024-12-23T11:26:07.721761867+01:00"
|
||||
created: "2025-12-11T12:44:25.468680645+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: e87ed898079a852957a67a4caf3fbd27b9098e413f5d961b7a771a6ae8b3e17c
|
||||
|
|
@ -26,7 +49,7 @@ entries:
|
|||
version: 1.14.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.13.0
|
||||
created: "2024-12-23T11:26:07.719409282+01:00"
|
||||
created: "2025-12-11T12:44:25.466716836+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8
|
||||
|
|
@ -49,7 +72,7 @@ entries:
|
|||
version: 1.13.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.12.2
|
||||
created: "2024-12-23T11:26:07.717202918+01:00"
|
||||
created: "2025-12-11T12:44:25.464739895+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd
|
||||
|
|
@ -72,7 +95,7 @@ entries:
|
|||
version: 1.12.2
|
||||
- apiVersion: v2
|
||||
appVersion: 1.11.0
|
||||
created: "2024-12-23T11:26:07.714792146+01:00"
|
||||
created: "2025-12-11T12:44:25.462698399+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2
|
||||
|
|
@ -95,7 +118,7 @@ entries:
|
|||
version: 1.11.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.10.1
|
||||
created: "2024-12-23T11:26:07.712194397+01:00"
|
||||
created: "2025-12-11T12:44:25.460357063+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce
|
||||
|
|
@ -116,27 +139,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-ui-1.10.1.tgz
|
||||
version: 1.10.1
|
||||
- apiVersion: v2
|
||||
appVersion: 1.9.0
|
||||
created: "2024-12-23T11:26:07.723891496+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.9.0.tgz
|
||||
version: 1.9.0
|
||||
generated: "2024-12-23T11:26:07.709192608+01:00"
|
||||
generated: "2025-12-11T12:44:25.45732896+01:00"
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
|
|
@ -84,11 +84,11 @@ spec:
|
|||
"limit_iops": 16000,
|
||||
"limit_throughput": 1000,
|
||||
"postgresql_versions": [
|
||||
"18",
|
||||
"17",
|
||||
"16",
|
||||
"15",
|
||||
"14",
|
||||
"13"
|
||||
]
|
||||
}
|
||||
{{- if .Values.extraEnvs }}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||
image:
|
||||
registry: ghcr.io
|
||||
repository: zalando/postgres-operator-ui
|
||||
tag: v1.14.0
|
||||
tag: v1.15.1
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: postgres-operator
|
||||
version: 1.14.0
|
||||
appVersion: 1.14.0
|
||||
version: 1.15.1
|
||||
appVersion: 1.15.1
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ spec:
|
|||
type: string
|
||||
docker_image:
|
||||
type: string
|
||||
default: "ghcr.io/zalando/spilo-17:4.0-p2"
|
||||
default: "ghcr.io/zalando/spilo-18:4.1-p1"
|
||||
enable_crd_registration:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -79,6 +79,9 @@ spec:
|
|||
enable_lazy_spilo_upgrade:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_maintenance_windows:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_pgversion_env_var:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -96,9 +99,16 @@ spec:
|
|||
default: ""
|
||||
ignore_instance_limits_annotation_key:
|
||||
type: string
|
||||
ignore_resources_limits_annotation_key:
|
||||
type: string
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
default: false
|
||||
maintenance_windows:
|
||||
items:
|
||||
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
|
||||
type: string
|
||||
type: array
|
||||
max_instances:
|
||||
type: integer
|
||||
description: "-1 = disabled"
|
||||
|
|
@ -167,10 +177,10 @@ spec:
|
|||
type: string
|
||||
minimal_major_version:
|
||||
type: string
|
||||
default: "13"
|
||||
default: "14"
|
||||
target_major_version:
|
||||
type: string
|
||||
default: "17"
|
||||
default: "18"
|
||||
kubernetes:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -510,7 +520,7 @@ spec:
|
|||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
|
||||
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
|
|
|
|||
|
|
@ -278,7 +278,6 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
weight:
|
||||
format: int32
|
||||
type: integer
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
type: object
|
||||
|
|
@ -375,11 +374,11 @@ spec:
|
|||
version:
|
||||
type: string
|
||||
enum:
|
||||
- "13"
|
||||
- "14"
|
||||
- "15"
|
||||
- "16"
|
||||
- "17"
|
||||
- "18"
|
||||
parameters:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -494,13 +493,19 @@ spec:
|
|||
type: string
|
||||
standby_port:
|
||||
type: string
|
||||
oneOf:
|
||||
standby_primary_slot_name:
|
||||
type: string
|
||||
anyOf:
|
||||
- required:
|
||||
- s3_wal_path
|
||||
- required:
|
||||
- gs_wal_path
|
||||
- required:
|
||||
- standby_host
|
||||
not:
|
||||
required:
|
||||
- s3_wal_path
|
||||
- gs_wal_path
|
||||
streams:
|
||||
type: array
|
||||
items:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,53 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator:
|
||||
- apiVersion: v2
|
||||
appVersion: 1.15.1
|
||||
created: "2025-12-17T14:48:33.832345061+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 9f3edc3d796105c02c04eaae28a78e58fb08c1847a9de012245fd6ac2c0d2c00
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.15.1.tgz
|
||||
version: 1.15.1
|
||||
- apiVersion: v2
|
||||
appVersion: 1.15.0
|
||||
created: "2025-12-17T14:48:33.826117296+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 002dd47647bf51fbba023bd1762d807be478cf37de7a44b80cd01ac1f20bd94a
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.15.0.tgz
|
||||
version: 1.15.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.14.0
|
||||
created: "2024-12-23T11:25:32.596716566+01:00"
|
||||
created: "2025-12-17T14:48:33.819729144+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 36e1571f3f455b213f16cdda7b1158648e8e84deb804ba47ed6b9b6d19263ba8
|
||||
|
|
@ -25,7 +69,7 @@ entries:
|
|||
version: 1.14.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.13.0
|
||||
created: "2024-12-23T11:25:32.591136261+01:00"
|
||||
created: "2025-12-17T14:48:33.81038602+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef
|
||||
|
|
@ -47,7 +91,7 @@ entries:
|
|||
version: 1.13.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.12.2
|
||||
created: "2024-12-23T11:25:32.585419709+01:00"
|
||||
created: "2025-12-17T14:48:33.803256825+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8
|
||||
|
|
@ -69,7 +113,7 @@ entries:
|
|||
version: 1.12.2
|
||||
- apiVersion: v2
|
||||
appVersion: 1.11.0
|
||||
created: "2024-12-23T11:25:32.580077286+01:00"
|
||||
created: "2025-12-17T14:48:33.797369053+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9
|
||||
|
|
@ -91,7 +135,7 @@ entries:
|
|||
version: 1.11.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.10.1
|
||||
created: "2024-12-23T11:25:32.574641578+01:00"
|
||||
created: "2025-12-17T14:48:33.791368349+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c
|
||||
|
|
@ -111,26 +155,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-1.10.1.tgz
|
||||
version: 1.10.1
|
||||
- apiVersion: v2
|
||||
appVersion: 1.9.0
|
||||
created: "2024-12-23T11:25:32.604748814+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.9.0.tgz
|
||||
version: 1.9.0
|
||||
generated: "2024-12-23T11:25:32.568598763+01:00"
|
||||
generated: "2025-12-17T14:48:33.785159183+01:00"
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: ghcr.io
|
||||
repository: zalando/postgres-operator
|
||||
tag: v1.14.0
|
||||
tag: v1.15.1
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -27,25 +27,32 @@ configGeneral:
|
|||
- "all"
|
||||
# update only the statefulsets without immediately doing the rolling update
|
||||
enable_lazy_spilo_upgrade: false
|
||||
# toogle to use maintenance windows feature
|
||||
enable_maintenance_windows: true
|
||||
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
|
||||
enable_pgversion_env_var: true
|
||||
# start any new database pod without limitations on shm memory
|
||||
enable_shm_volume: true
|
||||
# enables backwards compatible path between Spilo 12 and Spilo 13+ images
|
||||
enable_spilo_wal_path_compat: false
|
||||
# operator will sync only clusters where name starts with teamId prefix
|
||||
enable_team_id_clustername_prefix: false
|
||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||
etcd_host: ""
|
||||
# Spilo docker image
|
||||
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
|
||||
docker_image: ghcr.io/zalando/spilo-18:4.1-p1
|
||||
|
||||
# key name for annotation to ignore globally configured instance limits
|
||||
# ignore_instance_limits_annotation_key: ""
|
||||
|
||||
# key name for annotation to ignore globally configured resources thresholds
|
||||
# ignore_resources_limits_annotation_key: ""
|
||||
|
||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: false
|
||||
|
||||
# maintenance windows applied to all Postgres clusters unless overridden in the manifest
|
||||
# maintenance_windows:
|
||||
# - "Sun:01:00-06:00"
|
||||
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: -1
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -89,9 +96,9 @@ configMajorVersionUpgrade:
|
|||
# - acid
|
||||
|
||||
# minimal Postgres major version that will not automatically be upgraded
|
||||
minimal_major_version: "13"
|
||||
minimal_major_version: "14"
|
||||
# target Postgres major version when upgrading clusters automatically
|
||||
target_major_version: "17"
|
||||
target_major_version: "18"
|
||||
|
||||
configKubernetes:
|
||||
# list of additional capabilities for postgres container
|
||||
|
|
@ -364,7 +371,7 @@ configLogicalBackup:
|
|||
# logical_backup_memory_request: ""
|
||||
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1"
|
||||
# path of google cloud service account json file
|
||||
# logical_backup_google_application_credentials: ""
|
||||
|
||||
|
|
|
|||
161
delivery.yaml
161
delivery.yaml
|
|
@ -1,72 +1,107 @@
|
|||
version: "2017-09-20"
|
||||
allow_concurrent_steps: true
|
||||
|
||||
build_env: &BUILD_ENV
|
||||
PYTHON_BASE_IMAGE: container-registry.zalando.net/library/python-3.11-slim
|
||||
ALPINE_BASE_IMAGE: container-registry.zalando.net/library/alpine-3
|
||||
MULTI_ARCH_REGISTRY: container-registry-test.zalando.net/acid
|
||||
|
||||
pipeline:
|
||||
- id: build-postgres-operator
|
||||
type: script
|
||||
vm_config:
|
||||
type: linux
|
||||
size: large
|
||||
image: cdp-runtime/go
|
||||
cache:
|
||||
paths:
|
||||
- /go/pkg/mod # pkg cache for Go modules
|
||||
- ~/.cache/go-build # Go build cache
|
||||
commands:
|
||||
- desc: Run unit tests
|
||||
cmd: |
|
||||
make deps mocks test
|
||||
- id: build-postgres-operator
|
||||
env:
|
||||
<<: *BUILD_ENV
|
||||
type: script
|
||||
vm_config:
|
||||
type: linux
|
||||
size: large
|
||||
image: cdp-runtime/go
|
||||
cache:
|
||||
paths:
|
||||
- /go/pkg/mod # pkg cache for Go modules
|
||||
- ~/.cache/go-build # Go build cache
|
||||
commands:
|
||||
- desc: Run unit tests
|
||||
cmd: |
|
||||
make mocks test
|
||||
if ! git diff --quiet; then
|
||||
echo "Build resulted in files being changed, likely they were not checked in"
|
||||
exit 1
|
||||
fi
|
||||
- desc: Build Docker image
|
||||
cmd: |
|
||||
if [ -z ${CDP_SOURCE_BRANCH} ]; then
|
||||
IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator
|
||||
else
|
||||
IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator-test
|
||||
fi
|
||||
|
||||
- desc: Build Docker image
|
||||
cmd: |
|
||||
IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"}
|
||||
if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]]
|
||||
then
|
||||
IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator
|
||||
else
|
||||
IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test
|
||||
fi
|
||||
export IMAGE
|
||||
make docker push
|
||||
docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use
|
||||
docker buildx build --platform "linux/amd64,linux/arm64" \
|
||||
--build-arg BASE_IMAGE="${ALPINE_BASE_IMAGE}" \
|
||||
-t "${IMAGE}:${CDP_BUILD_VERSION}" \
|
||||
-f docker/Dockerfile \
|
||||
--push .
|
||||
|
||||
- id: build-operator-ui
|
||||
type: script
|
||||
vm_config:
|
||||
type: linux
|
||||
- id: build-operator-ui
|
||||
env:
|
||||
<<: *BUILD_ENV
|
||||
type: script
|
||||
vm_config:
|
||||
type: linux
|
||||
|
||||
commands:
|
||||
- desc: 'Prepare environment'
|
||||
cmd: |
|
||||
apt-get update
|
||||
apt-get install -y build-essential
|
||||
commands:
|
||||
- desc: 'Prepare environment'
|
||||
cmd: |
|
||||
apt-get update
|
||||
apt-get install -y build-essential
|
||||
|
||||
- desc: 'Compile JavaScript app'
|
||||
cmd: |
|
||||
cd ui
|
||||
make appjs
|
||||
- desc: 'Compile JavaScript app'
|
||||
cmd: |
|
||||
cd ui
|
||||
make appjs
|
||||
|
||||
- desc: 'Build and push Docker image'
|
||||
cmd: |
|
||||
cd ui
|
||||
IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"}
|
||||
if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]]
|
||||
then
|
||||
IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui
|
||||
else
|
||||
IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui-test
|
||||
fi
|
||||
export IMAGE
|
||||
make docker
|
||||
make push
|
||||
- desc: 'Build and push Docker image'
|
||||
cmd: |
|
||||
cd ui
|
||||
if [ -z ${CDP_SOURCE_BRANCH} ]; then
|
||||
IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator-ui
|
||||
else
|
||||
IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator-ui-test
|
||||
fi
|
||||
|
||||
- id: build-logical-backup
|
||||
type: script
|
||||
vm_config:
|
||||
type: linux
|
||||
make appjs
|
||||
docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use
|
||||
docker buildx build --platform linux/amd64,linux/arm64 \
|
||||
--build-arg BASE_IMAGE="${PYTHON_BASE_IMAGE}" \
|
||||
-t ${IMAGE}:${CDP_BUILD_VERSION} \
|
||||
--push .
|
||||
|
||||
commands:
|
||||
- desc: Build image
|
||||
cmd: |
|
||||
cd logical-backup
|
||||
export TAG=$(git describe --tags --always --dirty)
|
||||
IMAGE="registry-write.opensource.zalan.do/acid/logical-backup"
|
||||
docker build --rm -t "$IMAGE:$TAG$CDP_TAG" .
|
||||
docker push "$IMAGE:$TAG$CDP_TAG"
|
||||
if [ -z ${CDP_SOURCE_BRANCH} ]; then
|
||||
cdp-promote-image ${IMAGE}:${CDP_BUILD_VERSION}
|
||||
fi
|
||||
|
||||
- id: build-logical-backup
|
||||
env:
|
||||
<<: *BUILD_ENV
|
||||
type: script
|
||||
vm_config:
|
||||
type: linux
|
||||
|
||||
commands:
|
||||
- desc: Build image
|
||||
cmd: |
|
||||
cd logical-backup
|
||||
if [ -z ${CDP_SOURCE_BRANCH} ]; then
|
||||
IMAGE=${MULTI_ARCH_REGISTRY}/logical-backup
|
||||
else
|
||||
IMAGE=${MULTI_ARCH_REGISTRY}/logical-backup-test
|
||||
fi
|
||||
|
||||
docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use
|
||||
docker buildx build --platform linux/amd64,linux/arm64 \
|
||||
-t ${IMAGE}:${CDP_BUILD_VERSION} \
|
||||
--push .
|
||||
|
||||
if [ -z ${CDP_SOURCE_BRANCH} ]; then
|
||||
cdp-promote-image ${IMAGE}:${CDP_BUILD_VERSION}
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.23-alpine
|
||||
FROM golang:1.25-alpine
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
# We need root certificates to deal with teams api over https
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest
|
||||
FROM golang:1.23-alpine AS builder
|
||||
ARG BASE_IMAGE=alpine:latest
|
||||
FROM golang:1.25-alpine AS builder
|
||||
ARG VERSION=latest
|
||||
|
||||
COPY . /go/src/github.com/zalando/postgres-operator
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ apt-get install -y wget
|
|||
|
||||
(
|
||||
cd /tmp
|
||||
wget -q "https://storage.googleapis.com/golang/go1.23.4.linux-${arch}.tar.gz" -O go.tar.gz
|
||||
wget -q "https://storage.googleapis.com/golang/go1.25.3.linux-${arch}.tar.gz" -O go.tar.gz
|
||||
tar -xf go.tar.gz
|
||||
mv go /usr/local
|
||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||
|
|
|
|||
|
|
@ -65,7 +65,10 @@ the `PGVERSION` environment variable is set for the database pods. Since
|
|||
In-place major version upgrades can be configured to be executed by the
|
||||
operator with the `major_version_upgrade_mode` option. By default, it is
|
||||
enabled (mode: `manual`). In any case, altering the version in the manifest
|
||||
will trigger a rolling update of pods to update the `PGVERSION` env variable.
|
||||
will update the desired `PGVERSION`. If `maintenanceWindows` are configured,
|
||||
major-version-related pod rotation is deferred until the next maintenance
|
||||
window. Without maintenance windows, the operator will trigger a rolling
|
||||
update of pods to apply the new `PGVERSION`.
|
||||
Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py)
|
||||
script will notice the version mismatch but start the current version again.
|
||||
|
||||
|
|
@ -92,10 +95,11 @@ Thus, the `full` mode can create drift between desired and actual state.
|
|||
|
||||
### Upgrade during maintenance windows
|
||||
|
||||
When `maintenanceWindows` are defined in the Postgres manifest the operator
|
||||
will trigger a major version upgrade only during these periods. Make sure they
|
||||
are at least twice as long as your configured `resync_period` to guarantee
|
||||
that operator actions can be triggered.
|
||||
When `maintenanceWindows` are defined in the Postgres manifest or in the global
|
||||
config the operator will trigger major-version-related pod rotation and the
|
||||
major version upgrade only during these periods. Make sure they are at least
|
||||
twice as long as your configured `resync_period` to guarantee that operator
|
||||
actions can be triggered.
|
||||
|
||||
### Upgrade annotations
|
||||
|
||||
|
|
@ -195,12 +199,14 @@ from numerous escape characters in the latter log entry, view it in CLI with
|
|||
used internally in K8s.
|
||||
|
||||
The StatefulSet is replaced if the following properties change:
|
||||
|
||||
- annotations
|
||||
- volumeClaimTemplates
|
||||
- template volumes
|
||||
|
||||
The StatefulSet is replaced and a rolling updates is triggered if the following
|
||||
properties differ between the old and new state:
|
||||
|
||||
- container name, ports, image, resources, env, envFrom, securityContext and volumeMounts
|
||||
- template labels, annotations, service account, securityContext, affinity, priority class and termination grace period
|
||||
|
||||
|
|
@ -885,19 +891,18 @@ cluster manifest. In the case any of these variables are omitted from the
|
|||
manifest, the operator configuration settings `enable_master_load_balancer` and
|
||||
`enable_replica_load_balancer` apply. Note that the operator settings affect
|
||||
all Postgresql services running in all namespaces watched by the operator.
|
||||
If load balancing is enabled two default annotations will be applied to its
|
||||
services:
|
||||
If load balancing is enabled the following default annotation will be applied to
|
||||
its services:
|
||||
|
||||
- `external-dns.alpha.kubernetes.io/hostname` with the value defined by the
|
||||
operator configs `master_dns_name_format` and `replica_dns_name_format`.
|
||||
This value can't be overwritten. If any changing in its value is needed, it
|
||||
MUST be done changing the DNS format operator config parameters; and
|
||||
- `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` with
|
||||
a default value of "3600".
|
||||
|
||||
There are multiple options to specify service annotations that will be merged
|
||||
with each other and override in the following order (where latter take
|
||||
precedence):
|
||||
|
||||
1. Default annotations if LoadBalancer is enabled
|
||||
2. Globally configured `custom_service_annotations`
|
||||
3. `serviceAnnotations` specified in the cluster manifest
|
||||
|
|
@ -1309,7 +1314,7 @@ aws_or_gcp:
|
|||
|
||||
If cluster members have to be (re)initialized restoring physical backups
|
||||
happens automatically either from the backup location or by running
|
||||
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html)
|
||||
[pg_basebackup](https://www.postgresql.org/docs/18/app-pgbasebackup.html)
|
||||
on one of the other running instances (preferably replicas if they do not lag
|
||||
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
|
||||
clusters.
|
||||
|
|
@ -1343,10 +1348,12 @@ If you are using [additional environment variables](#custom-pod-environment-vari
|
|||
to access your backup location you have to copy those variables and prepend
|
||||
the `STANDBY_` prefix for Spilo to find the backups and WAL files to stream.
|
||||
|
||||
Alternatively, standby clusters can also stream from a remote primary cluster.
|
||||
Standby clusters can also stream from a remote primary cluster.
|
||||
You have to specify the host address. Port is optional and defaults to 5432.
|
||||
Note, that only one of the options (`s3_wal_path`, `gs_wal_path`,
|
||||
`standby_host`) can be present under the `standby` top-level key.
|
||||
You can combine `standby_host` with either `s3_wal_path` or `gs_wal_path`
|
||||
for additional redundancy. Note that `s3_wal_path` and `gs_wal_path` are
|
||||
mutually exclusive. At least one of `s3_wal_path`, `gs_wal_path`, or
|
||||
`standby_host` must be specified under the `standby` top-level key.
|
||||
|
||||
## Logical backups
|
||||
|
||||
|
|
@ -1495,7 +1502,7 @@ make docker
|
|||
|
||||
# build in image in minikube docker env
|
||||
eval $(minikube docker-env)
|
||||
docker build -t ghcr.io/zalando/postgres-operator-ui:v1.13.0 .
|
||||
docker build -t ghcr.io/zalando/postgres-operator-ui:v1.15.1 .
|
||||
|
||||
# apply UI manifests next to a running Postgres Operator
|
||||
kubectl apply -f manifests/
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ under the ~/go/src sub directories.
|
|||
|
||||
Given the schema above, the Postgres Operator source code located at
|
||||
`github.com/zalando/postgres-operator` should be put at
|
||||
-`~/go/src/github.com/zalando/postgres-operator`.
|
||||
`~/go/src/github.com/zalando/postgres-operator`.
|
||||
|
||||
```bash
|
||||
export GOPATH=~/go
|
||||
|
|
@ -33,17 +33,14 @@ by setting the `GO111MODULE` environment variable to `on`. The make targets do
|
|||
this for you, so simply run
|
||||
|
||||
```bash
|
||||
make deps
|
||||
make
|
||||
```
|
||||
|
||||
This would take a while to complete. You have to redo `make deps` every time
|
||||
your dependencies list changes, i.e. after adding a new library dependency.
|
||||
|
||||
Build the operator with the `make docker` command. You may define the TAG
|
||||
variable to assign an explicit tag to your Docker image and the IMAGE to set
|
||||
the image name. By default, the tag is computed with
|
||||
`git describe --tags --always --dirty` and the image is
|
||||
`registry.opensource.zalan.do/acid/postgres-operator`
|
||||
`ghcr.io/zalando/postgres-operator`
|
||||
|
||||
```bash
|
||||
export TAG=$(git describe --tags --always --dirty)
|
||||
|
|
@ -72,7 +69,7 @@ make docker
|
|||
|
||||
# kind
|
||||
make docker
|
||||
kind load docker-image registry.opensource.zalan.do/acid/postgres-operator:${TAG} --name <kind-cluster-name>
|
||||
kind load docker-image ghcr.io/zalando/postgres-operator:${TAG} --name <kind-cluster-name>
|
||||
```
|
||||
|
||||
Then create a new Postgres Operator deployment.
|
||||
|
|
@ -105,6 +102,7 @@ and K8s-like APIs for its custom resource definitions, namely the
|
|||
Postgres CRD and the operator CRD. The usage of the code generation follows
|
||||
conventions from the K8s community. Relevant scripts live in the `hack`
|
||||
directory:
|
||||
|
||||
* `update-codegen.sh` triggers code generation for the APIs defined in `pkg/apis/acid.zalan.do/`,
|
||||
* `verify-codegen.sh` checks if the generated code is up-to-date (to be used within CI).
|
||||
|
||||
|
|
@ -112,6 +110,7 @@ The `/pkg/generated/` contains the resultant code. To make these scripts work,
|
|||
you may need to `export GOPATH=$(go env GOPATH)`
|
||||
|
||||
References for code generation are:
|
||||
|
||||
* [Relevant pull request](https://github.com/zalando/postgres-operator/pull/369)
|
||||
See comments there for minor issues that can sometimes broke the generation process.
|
||||
* [Code generator source code](https://github.com/kubernetes/code-generator)
|
||||
|
|
@ -221,14 +220,13 @@ dlv connect 127.0.0.1:DLV_PORT
|
|||
Prerequisites:
|
||||
|
||||
```bash
|
||||
make deps
|
||||
make mocks
|
||||
```
|
||||
|
||||
To run all unit tests, you can simply do:
|
||||
|
||||
```bash
|
||||
go test ./pkg/...
|
||||
make test
|
||||
```
|
||||
|
||||
In case if you need to debug your unit test, it's possible to use delve:
|
||||
|
|
@ -274,10 +272,10 @@ Examples for fake K8s objects can be found in:
|
|||
|
||||
The operator provides reference end-to-end (e2e) tests to
|
||||
ensure various infrastructure parts work smoothly together. The test code is available at `e2e/tests`.
|
||||
The special `registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build.
|
||||
The special `ghcr.io/zalando/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build.
|
||||
|
||||
Each e2e execution tests a Postgres Operator image built from the current git branch. The test
|
||||
runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/),
|
||||
Each e2e execution tests a Postgres Operator image built from the current git branch.
|
||||
The test runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/),
|
||||
utilizes provided manifest examples, and runs e2e tests contained in the `tests`
|
||||
folder. The K8s API client in the container connects to the `kind` cluster via
|
||||
the standard Docker `bridge` network. The kind cluster is deleted if tests
|
||||
|
|
@ -315,6 +313,7 @@ precedence.
|
|||
|
||||
Update the following Go files that obtain the configuration parameter from the
|
||||
manifest files:
|
||||
|
||||
* [operator_configuration_type.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go)
|
||||
* [operator_config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/controller/operator_config.go)
|
||||
* [config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config.go)
|
||||
|
|
@ -323,6 +322,7 @@ Postgres manifest parameters are defined in the [api package](https://github.com
|
|||
The operator behavior has to be implemented at least in [k8sres.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres.go).
|
||||
Validation of CRD parameters is controlled in [crds.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/crds.go).
|
||||
Please, reflect your changes in tests, for example in:
|
||||
|
||||
* [config_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config_test.go)
|
||||
* [k8sres_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres_test.go)
|
||||
* [util_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/util_test.go)
|
||||
|
|
@ -330,6 +330,7 @@ Please, reflect your changes in tests, for example in:
|
|||
### Updating manifest files
|
||||
|
||||
For the CRD-based configuration, please update the following files:
|
||||
|
||||
* the default [OperatorConfiguration](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
|
||||
* the CRD's [validation](https://github.com/zalando/postgres-operator/blob/master/manifests/operatorconfiguration.crd.yaml)
|
||||
* the CRD's validation in the [Helm chart](https://github.com/zalando/postgres-operator/blob/master/charts/postgres-operator/crds/operatorconfigurations.yaml)
|
||||
|
|
@ -342,6 +343,7 @@ Last but no least, update the [ConfigMap](https://github.com/zalando/postgres-op
|
|||
|
||||
Finally, add a section for each new configuration option and/or cluster manifest
|
||||
parameter in the reference documents:
|
||||
|
||||
* [config reference](reference/operator_parameters.md)
|
||||
* [manifest reference](reference/cluster_manifest.md)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ hence set it up first. For local tests we recommend to use one of the following
|
|||
solutions:
|
||||
|
||||
* [minikube](https://github.com/kubernetes/minikube/releases), which creates a
|
||||
single-node K8s cluster inside a VM (requires KVM or VirtualBox),
|
||||
K8s cluster inside a container or VM (requires Docker, KVM, Hyper-V, HyperKit, VirtualBox, or similar),
|
||||
* [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s
|
||||
clusters running on Docker (requires Docker)
|
||||
|
||||
|
|
@ -20,7 +20,7 @@ This quickstart assumes that you have started minikube or created a local kind
|
|||
cluster. Note that you can also use built-in K8s support in the Docker Desktop
|
||||
for Mac to follow the steps of this tutorial. You would have to replace
|
||||
`minikube start` and `minikube delete` with your launch actions for the Docker
|
||||
built-in K8s support.
|
||||
Desktop built-in K8s support.
|
||||
|
||||
## Configuration Options
|
||||
|
||||
|
|
|
|||
|
|
@ -116,9 +116,11 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
|
||||
* **maintenanceWindows**
|
||||
a list which defines specific time frames when certain maintenance operations
|
||||
such as automatic major upgrades or master pod migration. Accepted formats
|
||||
are "01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for specific
|
||||
days, with all times in UTC.
|
||||
such as automatic major upgrades or master pod migration are allowed to happen.
|
||||
Accepted formats are "01:00-06:00" for daily maintenance windows or
|
||||
"Sat:00:00-04:00" for specific days, with all times in UTC. Note, when the
|
||||
global config option `enable_maintenance_windows` is false, the specified
|
||||
windows will be ignored.
|
||||
|
||||
* **users**
|
||||
a map of usernames to user flags for the users that should be created in the
|
||||
|
|
@ -457,22 +459,31 @@ under the `clone` top-level key and do not affect the already running cluster.
|
|||
|
||||
On startup, an existing `standby` top-level key creates a standby Postgres
|
||||
cluster streaming from a remote location - either from a S3 or GCS WAL
|
||||
archive or a remote primary. Only one of options is allowed and required
|
||||
if the `standby` key is present.
|
||||
archive, a remote primary, or a combination of both. At least one of
|
||||
`s3_wal_path`, `gs_wal_path`, or `standby_host` must be specified.
|
||||
Note that `s3_wal_path` and `gs_wal_path` are mutually exclusive.
|
||||
|
||||
* **s3_wal_path**
|
||||
the url to S3 bucket containing the WAL archive of the remote primary.
|
||||
Can be combined with `standby_host` for additional redundancy.
|
||||
|
||||
* **gs_wal_path**
|
||||
the url to GS bucket containing the WAL archive of the remote primary.
|
||||
Can be combined with `standby_host` for additional redundancy.
|
||||
|
||||
* **standby_host**
|
||||
hostname or IP address of the primary to stream from.
|
||||
Can be specified alone or combined with either `s3_wal_path` or `gs_wal_path`.
|
||||
|
||||
* **standby_port**
|
||||
TCP port on which the primary is listening for connections. Patroni will
|
||||
use `"5432"` if not set.
|
||||
|
||||
* **standby_primary_slot_name**
|
||||
name of the replication slot to use on the primary server when streaming
|
||||
from a remote primary. See the Patroni documentation
|
||||
[here](https://patroni.readthedocs.io/en/latest/standby_cluster.html) for more details. Optional.
|
||||
|
||||
## Volume properties
|
||||
|
||||
Those parameters are grouped under the `volume` top-level key and define the
|
||||
|
|
@ -638,7 +649,7 @@ the global configuration before adding the `tls` section'.
|
|||
## Change data capture streams
|
||||
|
||||
This sections enables change data capture (CDC) streams via Postgres'
|
||||
[logical decoding](https://www.postgresql.org/docs/17/logicaldecoding.html)
|
||||
[logical decoding](https://www.postgresql.org/docs/18/logicaldecoding.html)
|
||||
feature and `pgoutput` plugin. While the Postgres operator takes responsibility
|
||||
for providing the setup to publish change events, it relies on external tools
|
||||
to consume them. At Zalando, we are using a workflow based on
|
||||
|
|
@ -671,7 +682,7 @@ can have the following properties:
|
|||
The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
|
||||
The application is responsible for putting events into a (JSON/B or VARCHAR)
|
||||
payload column of the outbox table in the structure of the specified target
|
||||
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/17/logical-replication-publication.html)
|
||||
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/18/logical-replication-publication.html)
|
||||
in Postgres for all tables specified for one `database` and `applicationId`.
|
||||
The CDC operator will consume from it shortly after transactions are
|
||||
committed to the outbox table. The `idColumn` will be used in telemetry for
|
||||
|
|
|
|||
|
|
@ -107,8 +107,13 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
* **kubernetes_use_configmaps**
|
||||
Select if setup uses endpoints (default), or configmaps to manage leader when
|
||||
DCS is kubernetes (not etcd or similar). In OpenShift it is not possible to
|
||||
use endpoints option, and configmaps is required. By default,
|
||||
`kubernetes_use_configmaps: false`, meaning endpoints will be used.
|
||||
use endpoints option, and configmaps is required. Starting with K8s 1.33,
|
||||
endpoints are marked as deprecated. It's recommended to switch to config maps
|
||||
instead. But, to do so make sure you scale the Postgres cluster down to just
|
||||
one primary pod (e.g. using `max_instances` option). Otherwise, you risk
|
||||
running into a split-brain scenario.
|
||||
By default, `kubernetes_use_configmaps: false`, meaning endpoints will be used.
|
||||
Starting from v1.16.0 the default will be changed to `true`.
|
||||
|
||||
* **docker_image**
|
||||
Spilo Docker image for Postgres instances. For production, don't rely on the
|
||||
|
|
@ -158,7 +163,26 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
for some clusters it might be required to scale beyond the limits that can be
|
||||
configured with `min_instances` and `max_instances` options. You can define
|
||||
an annotation key that can be used as a toggle in cluster manifests to ignore
|
||||
globally configured instance limits. The default is empty.
|
||||
globally configured instance limits. The value must be `"true"` to be
|
||||
effective. The default is empty which means the feature is disabled.
|
||||
|
||||
* **ignore_resources_limits_annotation_key**
|
||||
for some clusters it might be required to request resources beyond the globally
|
||||
configured thresholds for maximum requests and minimum limits. You can define
|
||||
an annotation key that can be used as a toggle in cluster manifests to ignore
|
||||
the thresholds. The value must be `"true"` to be effective. The default is empty
|
||||
which means the feature is disabled.
|
||||
|
||||
* **enable_maintenance_windows**
|
||||
toggle for using the maintenance windows feature. Default is `"true"`.
|
||||
|
||||
* **maintenance_windows**
|
||||
a list which defines specific time frames when certain maintenance
|
||||
operations such as automatic major upgrades or master pod migration are
|
||||
allowed to happen for all database clusters. Accepted formats are
|
||||
"01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for
|
||||
specific days, with all times in UTC. Locally defined maintenance
|
||||
windows take precedence over globally configured ones.
|
||||
|
||||
* **resync_period**
|
||||
period between consecutive sync requests. The default is `30m`.
|
||||
|
|
@ -247,12 +271,12 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key.
|
|||
|
||||
* **minimal_major_version**
|
||||
The minimal Postgres major version that will not automatically be upgraded
|
||||
when `major_version_upgrade_mode` is set to `"full"`. The default is `"13"`.
|
||||
when `major_version_upgrade_mode` is set to `"full"`. The default is `"14"`.
|
||||
|
||||
* **target_major_version**
|
||||
The target Postgres major version when upgrading clusters automatically
|
||||
which violate the configured allowed `minimal_major_version` when
|
||||
`major_version_upgrade_mode` is set to `"full"`. The default is `"17"`.
|
||||
`major_version_upgrade_mode` is set to `"full"`. The default is `"18"`.
|
||||
|
||||
## Kubernetes resources
|
||||
|
||||
|
|
@ -819,7 +843,7 @@ grouped under the `logical_backup` key.
|
|||
runs `pg_dumpall` on a replica if possible and uploads compressed results to
|
||||
an S3 bucket under the key `/<configured-s3-bucket-prefix>/<pg_cluster_name>/<cluster_k8s_uuid>/logical_backups`.
|
||||
The default image is the same image built with the Zalando-internal CI
|
||||
pipeline. Default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
|
||||
pipeline. Default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1"
|
||||
|
||||
* **logical_backup_google_application_credentials**
|
||||
Specifies the path of the google cloud service account json file. Default is empty.
|
||||
|
|
@ -876,6 +900,19 @@ grouped under the `logical_backup` key.
|
|||
* **logical_backup_cronjob_environment_secret**
|
||||
Reference to a Kubernetes secret, which keys will be added as environment variables to the cronjob. Default: ""
|
||||
|
||||
The following environment variables can be passed to the logical backup
|
||||
cronjob via `logical_backup_cronjob_environment_secret` to control
|
||||
connectivity checks before the backup starts:
|
||||
|
||||
* **LOGICAL_BACKUP_CONNECT_RETRIES**
|
||||
Number of times to retry connecting to the target PostgreSQL pod before
|
||||
giving up. This is useful when NetworkPolicy enforcement introduces a
|
||||
short delay before a newly-created pod's IP is allowed through ingress
|
||||
rules on the destination node. Default: "10"
|
||||
|
||||
* **LOGICAL_BACKUP_CONNECT_RETRY_DELAY**
|
||||
Delay in seconds between connectivity retries. Default: "2"
|
||||
|
||||
## Debugging the operator
|
||||
|
||||
Options to aid debugging of the operator itself. Grouped under the `debug` key.
|
||||
|
|
|
|||
33
docs/user.md
33
docs/user.md
|
|
@ -30,7 +30,7 @@ spec:
|
|||
databases:
|
||||
foo: zalando
|
||||
postgresql:
|
||||
version: "17"
|
||||
version: "18"
|
||||
```
|
||||
|
||||
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
|
||||
|
|
@ -109,7 +109,7 @@ metadata:
|
|||
spec:
|
||||
[...]
|
||||
postgresql:
|
||||
version: "17"
|
||||
version: "18"
|
||||
parameters:
|
||||
password_encryption: scram-sha-256
|
||||
```
|
||||
|
|
@ -517,7 +517,7 @@ Postgres Operator will create the following NOLOGIN roles:
|
|||
|
||||
The `<dbname>_owner` role is the database owner and should be used when creating
|
||||
new database objects. All members of the `admin` role, e.g. teams API roles, can
|
||||
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/17/sql-alterdefaultprivileges.html)
|
||||
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/18/sql-alterdefaultprivileges.html)
|
||||
are configured for the owner role so that the `<dbname>_reader` role
|
||||
automatically gets read-access (SELECT) to new tables and sequences and the
|
||||
`<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables,
|
||||
|
|
@ -594,7 +594,7 @@ spec:
|
|||
|
||||
### Schema `search_path` for default roles
|
||||
|
||||
The schema [`search_path`](https://www.postgresql.org/docs/17/ddl-schemas.html#DDL-SCHEMAS-PATH)
|
||||
The schema [`search_path`](https://www.postgresql.org/docs/18/ddl-schemas.html#DDL-SCHEMAS-PATH)
|
||||
for each role will include the role name and the schemas, this role should have
|
||||
access to. So `foo_bar_writer` does not have to schema-qualify tables from
|
||||
schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and
|
||||
|
|
@ -695,7 +695,7 @@ handle it.
|
|||
|
||||
### HugePages support
|
||||
|
||||
The operator supports [HugePages](https://www.postgresql.org/docs/17/kernel-resources.html#LINUX-HUGEPAGES).
|
||||
The operator supports [HugePages](https://www.postgresql.org/docs/18/kernel-resources.html#LINUX-HUGEPAGES).
|
||||
To enable HugePages, set the matching resource requests and/or limits in the manifest:
|
||||
|
||||
```yaml
|
||||
|
|
@ -757,7 +757,7 @@ If you need to define a `nodeAffinity` for all your Postgres clusters use the
|
|||
|
||||
## In-place major version upgrade
|
||||
|
||||
Starting with Spilo 13, operator supports in-place major version upgrade to a
|
||||
Starting with Spilo 14, operator supports in-place major version upgrade to a
|
||||
higher major version (e.g. from PG 14 to PG 16). To trigger the upgrade,
|
||||
simply increase the version in the manifest. It is your responsibility to test
|
||||
your applications against the new version before the upgrade; downgrading is
|
||||
|
|
@ -792,7 +792,7 @@ spec:
|
|||
clone:
|
||||
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
||||
cluster: "acid-minimal-cluster"
|
||||
timestamp: "2017-12-19T12:40:33+01:00"
|
||||
timestamp: "2025-12-19T12:40:33+01:00"
|
||||
```
|
||||
|
||||
Here `cluster` is a name of a source cluster that is going to be cloned. A new
|
||||
|
|
@ -827,7 +827,7 @@ spec:
|
|||
clone:
|
||||
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
||||
cluster: "acid-minimal-cluster"
|
||||
timestamp: "2017-12-19T12:40:33+01:00"
|
||||
timestamp: "2025-12-19T12:40:33+01:00"
|
||||
s3_wal_path: "s3://custom/path/to/bucket"
|
||||
s3_endpoint: https://s3.acme.org
|
||||
s3_access_key_id: 0123456789abcdef0123456789abcdef
|
||||
|
|
@ -838,7 +838,7 @@ spec:
|
|||
### Clone directly
|
||||
|
||||
Another way to get a fresh copy of your source DB cluster is via
|
||||
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html). To
|
||||
[pg_basebackup](https://www.postgresql.org/docs/18/app-pgbasebackup.html). To
|
||||
use this feature simply leave out the timestamp field from the clone section.
|
||||
The operator will connect to the service of the source cluster by name. If the
|
||||
cluster is called test, then the connection string will look like host=test
|
||||
|
|
@ -900,8 +900,9 @@ the PostgreSQL version between source and target cluster has to be the same.
|
|||
|
||||
To start a cluster as standby, add the following `standby` section in the YAML
|
||||
file. You can stream changes from archived WAL files (AWS S3 or Google Cloud
|
||||
Storage) or from a remote primary. Only one option can be specified in the
|
||||
manifest:
|
||||
Storage), from a remote primary, or combine a remote primary with a WAL archive.
|
||||
At least one of `s3_wal_path`, `gs_wal_path`, or `standby_host` must be specified.
|
||||
Note that `s3_wal_path` and `gs_wal_path` are mutually exclusive.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
|
|
@ -929,6 +930,16 @@ spec:
|
|||
standby_port: "5433"
|
||||
```
|
||||
|
||||
You can also combine a remote primary with a WAL archive for additional redundancy:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
standby:
|
||||
standby_host: "acid-minimal-cluster.default"
|
||||
standby_port: "5433"
|
||||
s3_wal_path: "s3://<bucketname>/spilo/<source_db_cluster>/<UID>/wal/<PGVERSION>"
|
||||
```
|
||||
|
||||
Note, that the pods and services use the same role labels like for normal clusters:
|
||||
The standby leader is labeled as `master`. When using the `standby_host` option
|
||||
you have to copy the credentials from the source cluster's secrets to successfully
|
||||
|
|
|
|||
|
|
@ -1,27 +1,20 @@
|
|||
# An image to run e2e tests.
|
||||
# The image does not include the tests; all necessary files are bind-mounted when a container starts.
|
||||
FROM ubuntu:20.04
|
||||
FROM python:3.11-slim
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
ENV TERM xterm-256color
|
||||
|
||||
COPY requirements.txt ./
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
python3 \
|
||||
python3-setuptools \
|
||||
python3-pip \
|
||||
curl \
|
||||
vim \
|
||||
&& pip3 install --no-cache-dir -r requirements.txt \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl \
|
||||
RUN apt-get -qq -y update \
|
||||
# https://www.psycopg.org/docs/install.html#psycopg-vs-psycopg-binary
|
||||
&& apt-get -qq -y install --no-install-recommends curl vim python3-dev \
|
||||
&& curl -LO https://dl.k8s.io/release/v1.32.9/bin/linux/amd64/kubectl \
|
||||
&& chmod +x ./kubectl \
|
||||
&& mv ./kubectl /usr/local/bin/kubectl \
|
||||
&& apt-get clean \
|
||||
&& apt-get -qq -y clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# working line
|
||||
# python3 -m unittest discover -v --failfast -k test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade --start-directory tests
|
||||
ENTRYPOINT ["python3", "-m", "unittest"]
|
||||
CMD ["discover","-v","--failfast","--start-directory","/tests"]
|
||||
COPY requirements.txt ./
|
||||
RUN pip install -r ./requirements.txt
|
||||
|
||||
CMD ["python", "-m", "unittest", "discover", "-v", "--failfast", "--start-directory", "/tests"]
|
||||
|
|
@ -11,7 +11,7 @@ endif
|
|||
LOCAL_BUILD_FLAGS ?= $(BUILD_FLAGS)
|
||||
LDFLAGS ?= -X=main.version=$(VERSION)
|
||||
|
||||
IMAGE ?= registry.opensource.zalan.do/acid/$(BINARY)
|
||||
IMAGE ?= ghcr.io/zalando/$(BINARY)
|
||||
VERSION ?= $(shell git describe --tags --always --dirty)
|
||||
TAG ?= $(VERSION)
|
||||
GITHEAD = $(shell git rev-parse --short HEAD)
|
||||
|
|
@ -46,7 +46,7 @@ tools:
|
|||
# install pinned version of 'kind'
|
||||
# go install must run outside of a dir with a (module-based) Go project !
|
||||
# otherwise go install updates project's dependencies and/or behaves differently
|
||||
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.24.0
|
||||
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.27.0
|
||||
|
||||
e2etest: tools copy clean
|
||||
./run.sh main
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@
|
|||
|
||||
export cluster_name="postgres-operator-e2e-tests"
|
||||
export kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
||||
export operator_image="registry.opensource.zalan.do/acid/postgres-operator:latest"
|
||||
export e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
|
||||
export operator_image="ghcr.io/zalando/postgres-operator:latest"
|
||||
export e2e_test_runner_image="ghcr.io/zalando/postgres-operator-e2e-tests-runner:latest"
|
||||
|
||||
docker run -it --entrypoint /bin/bash --network=host -e "TERM=xterm-256color" \
|
||||
--mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
kubernetes==29.2.0
|
||||
kubernetes==31.0.0
|
||||
timeout_decorator==0.5.0
|
||||
pyyaml==6.0.1
|
||||
pyyaml==6.0.3
|
||||
|
|
|
|||
31
e2e/run.sh
31
e2e/run.sh
|
|
@ -7,23 +7,36 @@ set -o pipefail
|
|||
IFS=$'\n\t'
|
||||
|
||||
readonly cluster_name="postgres-operator-e2e-tests"
|
||||
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
||||
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3"
|
||||
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
|
||||
readonly kubeconfig_path="${HOME}/kind-config-${cluster_name}"
|
||||
readonly spilo_image="ghcr.io/zalando/spilo-18:4.1-p1"
|
||||
readonly e2e_test_runner_image="ghcr.io/zalando/postgres-operator-e2e-tests-runner:latest"
|
||||
|
||||
export GOPATH=${GOPATH-~/go}
|
||||
export PATH=${GOPATH}/bin:$PATH
|
||||
|
||||
# detect system architecture for pulling the correct Spilo image
|
||||
case "$(uname -m)" in
|
||||
x86_64) readonly PLATFORM="linux/amd64" ;;
|
||||
aarch64|arm64) readonly PLATFORM="linux/arm64" ;;
|
||||
*) echo "Unsupported architecture: $(uname -m)"; exit 1 ;;
|
||||
esac
|
||||
|
||||
echo "Clustername: ${cluster_name}"
|
||||
echo "Kubeconfig path: ${kubeconfig_path}"
|
||||
|
||||
function pull_images(){
|
||||
operator_tag=$(git describe --tags --always --dirty)
|
||||
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]]
|
||||
image_name="ghcr.io/zalando/postgres-operator:${operator_tag}"
|
||||
if [[ -z $(docker images -q "${image_name}") ]]
|
||||
then
|
||||
docker pull registry.opensource.zalan.do/acid/postgres-operator:latest
|
||||
if ! docker pull "${image_name}"
|
||||
then
|
||||
echo "Failed to pull operator image: ${image_name}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1)
|
||||
operator_image="${image_name}"
|
||||
echo "Using operator image: ${operator_image}"
|
||||
}
|
||||
|
||||
function start_kind(){
|
||||
|
|
@ -36,7 +49,9 @@ function start_kind(){
|
|||
|
||||
export KUBECONFIG="${kubeconfig_path}"
|
||||
kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml
|
||||
docker pull "${spilo_image}"
|
||||
|
||||
echo "Pulling Spilo image for platform ${PLATFORM}"
|
||||
docker pull --platform ${PLATFORM} "${spilo_image}"
|
||||
kind load docker-image "${spilo_image}" --name ${cluster_name}
|
||||
}
|
||||
|
||||
|
|
@ -52,7 +67,7 @@ function set_kind_api_server_ip(){
|
|||
# but update the IP address of the API server to the one from the Docker 'bridge' network
|
||||
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
|
||||
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
|
||||
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
|
||||
sed "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}" > "${kubeconfig_path}".tmp && mv "${kubeconfig_path}".tmp "${kubeconfig_path}"
|
||||
}
|
||||
|
||||
function generate_certificate(){
|
||||
|
|
|
|||
|
|
@ -12,9 +12,9 @@ from kubernetes import client
|
|||
from tests.k8s_api import K8s
|
||||
from kubernetes.client.rest import ApiException
|
||||
|
||||
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.3"
|
||||
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.4"
|
||||
SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p2"
|
||||
SPILO_CURRENT = "ghcr.io/zalando/spilo-e2e:dev-18.3"
|
||||
SPILO_LAZY = "ghcr.io/zalando/spilo-e2e:dev-18.4"
|
||||
SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-18:4.1-p1"
|
||||
|
||||
def to_selector(labels):
|
||||
return ",".join(["=".join(lbl) for lbl in labels.items()])
|
||||
|
|
@ -151,6 +151,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
'default', label_selector='name=postgres-operator').items[0].spec.containers[0].image
|
||||
print("Tested operator image: {}".format(actual_operator_image)) # shows up after tests finish
|
||||
|
||||
# load minimal Postgres manifest and wait for cluster to be up and running
|
||||
result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml")
|
||||
print('stdout: {}, stderr: {}'.format(result.stdout, result.stderr))
|
||||
try:
|
||||
|
|
@ -723,14 +724,12 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
master_annotations = {
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-minimal-cluster-pooler.default.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
}
|
||||
self.eventuallyTrue(lambda: k8s.check_service_annotations(
|
||||
master_pooler_label+","+pooler_label, master_annotations), "Wrong annotations")
|
||||
|
||||
replica_annotations = {
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-minimal-cluster-pooler-repl.default.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
}
|
||||
self.eventuallyTrue(lambda: k8s.check_service_annotations(
|
||||
replica_pooler_label+","+pooler_label, replica_annotations), "Wrong annotations")
|
||||
|
|
@ -1003,7 +1002,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"Origin": 2,
|
||||
"IsDbOwner": False,
|
||||
"Deleted": False,
|
||||
"Rotated": False
|
||||
"Rotated": False,
|
||||
"Degraded": False,
|
||||
})
|
||||
return True
|
||||
except:
|
||||
|
|
@ -1210,25 +1210,25 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
k8s.create_with_kubectl("manifests/minimal-postgres-lowest-version-manifest.yaml")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running")
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(check_version, 13, "Version is not correct")
|
||||
self.eventuallyEqual(check_version, 14, "Version is not correct")
|
||||
|
||||
master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label)
|
||||
# should upgrade immediately
|
||||
pg_patch_version_14 = {
|
||||
pg_patch_version_higher_version = {
|
||||
"spec": {
|
||||
"postgresql": {
|
||||
"version": "14"
|
||||
"version": "15"
|
||||
}
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14)
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_higher_version)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
self.eventuallyEqual(check_version, 14, "Version should be upgraded from 13 to 14")
|
||||
self.eventuallyEqual(check_version, 15, "Version should be upgraded from 14 to 15")
|
||||
|
||||
# check if annotation for last upgrade's success is set
|
||||
annotations = get_annotations()
|
||||
|
|
@ -1237,10 +1237,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# should not upgrade because current time is not in maintenanceWindow
|
||||
current_time = datetime.now()
|
||||
maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}"
|
||||
pg_patch_version_15_outside_mw = {
|
||||
pg_patch_version_higher_version_outside_mw = {
|
||||
"spec": {
|
||||
"postgresql": {
|
||||
"version": "15"
|
||||
"version": "16"
|
||||
},
|
||||
"maintenanceWindows": [
|
||||
maintenance_window_future
|
||||
|
|
@ -1248,23 +1248,23 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_outside_mw)
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_higher_version_outside_mw)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
# no pod replacement outside of the maintenance window
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
self.eventuallyEqual(check_version, 14, "Version should not be upgraded")
|
||||
self.eventuallyEqual(check_version, 15, "Version should not be upgraded")
|
||||
|
||||
second_annotations = get_annotations()
|
||||
self.assertIsNone(second_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure should not be set")
|
||||
|
||||
# change maintenanceWindows to current
|
||||
maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}"
|
||||
pg_patch_version_15_in_mw = {
|
||||
pg_patch_version_higher_version_in_mw = {
|
||||
"spec": {
|
||||
"postgresql": {
|
||||
"version": "15"
|
||||
"version": "16"
|
||||
},
|
||||
"maintenanceWindows": [
|
||||
maintenance_window_current
|
||||
|
|
@ -1273,13 +1273,13 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw)
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_higher_version_in_mw)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
self.eventuallyEqual(check_version, 15, "Version should be upgraded from 14 to 15")
|
||||
self.eventuallyEqual(check_version, 16, "Version should be upgraded from 15 to 16")
|
||||
|
||||
# check if annotation for last upgrade's success is updated after second upgrade
|
||||
third_annotations = get_annotations()
|
||||
|
|
@ -1287,7 +1287,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertNotEqual(annotations.get("last-major-upgrade-success"), third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not updated")
|
||||
|
||||
# test upgrade with failed upgrade annotation
|
||||
pg_patch_version_17 = {
|
||||
pg_patch_version_highest_version = {
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"last-major-upgrade-failure": "2024-01-02T15:04:05Z"
|
||||
|
|
@ -1295,28 +1295,28 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
},
|
||||
"spec": {
|
||||
"postgresql": {
|
||||
"version": "17"
|
||||
"version": "18"
|
||||
},
|
||||
},
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_17)
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_highest_version)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
self.eventuallyEqual(check_version, 15, "Version should not be upgraded because annotation for last upgrade's failure is set")
|
||||
self.eventuallyEqual(check_version, 16, "Version should not be upgraded because annotation for last upgrade's failure is set")
|
||||
|
||||
# change the version back to 15 and should remove failure annotation
|
||||
# change the version back to 16 and should remove failure annotation
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw)
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_higher_version_in_mw)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
self.eventuallyEqual(check_version, 15, "Version should not be upgraded from 15")
|
||||
self.eventuallyEqual(check_version, 16, "Version should not be upgraded from 16")
|
||||
fourth_annotations = get_annotations()
|
||||
self.assertIsNone(fourth_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure is not removed")
|
||||
|
||||
|
|
|
|||
122
go.mod
122
go.mod
|
|
@ -1,75 +1,91 @@
|
|||
module github.com/zalando/postgres-operator
|
||||
|
||||
go 1.23.4
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.53.8
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/r3labs/diff v1.1.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.9.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.30.4
|
||||
k8s.io/apiextensions-apiserver v0.25.9
|
||||
k8s.io/apimachinery v0.30.4
|
||||
k8s.io/client-go v0.30.4
|
||||
k8s.io/code-generator v0.25.9
|
||||
)
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.8
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/lib/pq v1.11.2
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/r3labs/diff v1.1.0
|
||||
github.com/sirupsen/logrus v1.9.4
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/crypto v0.45.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.32.9
|
||||
k8s.io/apiextensions-apiserver v0.32.9
|
||||
k8s.io/apimachinery v0.32.9
|
||||
k8s.io/client-go v0.32.9
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gobuffalo/flect v1.0.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.10.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/code-generator v0.32.9 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/controller-tools v0.17.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
)
|
||||
|
||||
tool (
|
||||
github.com/golang/mock/mockgen
|
||||
k8s.io/code-generator
|
||||
k8s.io/code-generator/cmd/client-gen
|
||||
k8s.io/code-generator/cmd/deepcopy-gen
|
||||
k8s.io/code-generator/cmd/informer-gen
|
||||
k8s.io/code-generator/cmd/lister-gen
|
||||
sigs.k8s.io/controller-tools/cmd/controller-gen
|
||||
)
|
||||
|
|
|
|||
230
go.sum
230
go.sum
|
|
@ -2,55 +2,58 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q
|
|||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aws/aws-sdk-go v1.53.8 h1:eoqGb1WOHIrCFKo1d51cMcnt1ralfLFaEqRkC5Zzv8k=
|
||||
github.com/aws/aws-sdk-go v1.53.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ=
|
||||
github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
|
||||
github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
|
|
@ -61,7 +64,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
|||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
|
|
@ -71,10 +73,17 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.11.2 h1:x6gxUeu39V0BHZiugWe8LXZYZ+Utk7hSJGThs8sdzfs=
|
||||
github.com/lib/pq v1.11.2/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
|
@ -86,22 +95,32 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
|||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
|
||||
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M=
|
||||
github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
|
|
@ -111,39 +130,40 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
|
||||
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -151,40 +171,45 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY=
|
||||
golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
|
|
@ -192,31 +217,32 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs=
|
||||
k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0=
|
||||
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
|
||||
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
|
||||
k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY=
|
||||
k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
||||
k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY=
|
||||
k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc=
|
||||
k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w=
|
||||
k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI=
|
||||
k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y=
|
||||
k8s.io/apiextensions-apiserver v0.32.9 h1:tpT1dUgWqEsTyrdoGckyw8OBASW1JfU08tHGaYBzFHY=
|
||||
k8s.io/apiextensions-apiserver v0.32.9/go.mod h1:FoCi4zCLK67LNCCssFa2Wr9q4Xbvjx7MW4tdze5tpoA=
|
||||
k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs=
|
||||
k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c=
|
||||
k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI=
|
||||
k8s.io/code-generator v0.32.9 h1:F9Gti/8I+nVNnQw02J36/YlSD5JMg4qDJ7sfRqpUICU=
|
||||
k8s.io/code-generator v0.32.9/go.mod h1:fLYBG9g52EJulRebmomL0vCU0PQeMr7mnscfZtAAGV4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-tools v0.17.3 h1:lwFPLicpBKLgIepah+c8ikRBubFW5kOQyT88r3EwfNw=
|
||||
sigs.k8s.io/controller-tools v0.17.3/go.mod h1:1ii+oXcYZkxcBXzwv3YZBlzjt1fvkrCGjVF73blosJI=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
|||
|
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Hack to adjust the generated postgresql CRD YAML file and add missing field
|
||||
# settings which can not be expressed via kubebuilder markers.
|
||||
#
|
||||
# Injections:
|
||||
#
|
||||
# * oneOf: for the standby field to enforce validation rules:
|
||||
# - s3_wal_path and gs_wal_path are mutually exclusive
|
||||
# - standby_host can be specified alone or with either s3_wal_path OR gs_wal_path
|
||||
# - at least one of s3_wal_path, gs_wal_path, or standby_host must be set
|
||||
# * type: string and pattern for the maintenanceWindows items.
|
||||
|
||||
file="${1:-"manifests/postgresql.crd.yaml"}"
|
||||
|
||||
sed -i '/^[[:space:]]*standby:$/{
|
||||
# Capture the indentation
|
||||
s/^\([[:space:]]*\)standby:$/\1standby:\n\1 anyOf:\n\1 - required:\n\1 - s3_wal_path\n\1 - required:\n\1 - gs_wal_path\n\1 - required:\n\1 - standby_host\n\1 not:\n\1 required:\n\1 - s3_wal_path\n\1 - gs_wal_path/
|
||||
}' "$file"
|
||||
|
||||
sed -i '/^[[:space:]]*maintenanceWindows:$/{
|
||||
# Capture the indentation
|
||||
s/^\([[:space:]]*\)maintenanceWindows:$/\1maintenanceWindows:\n\1 items:\n\1 pattern: '\''^\\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))-((2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))\\ *$'\''\n\1 type: string/
|
||||
}' "$file"
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
// +build tools
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
|
||||
package tools
|
||||
|
||||
import _ "k8s.io/code-generator"
|
||||
|
|
@ -1,26 +1,67 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
GENERATED_PACKAGE_ROOT="github.com"
|
||||
OPERATOR_PACKAGE_ROOT="${GENERATED_PACKAGE_ROOT}/zalando/postgres-operator"
|
||||
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
TARGET_CODE_DIR=${1-${SCRIPT_ROOT}/pkg}
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo "${GOPATH}"/src/k8s.io/code-generator)}
|
||||
SRC="github.com"
|
||||
GOPKG="$SRC/zalando/postgres-operator"
|
||||
CUSTOM_RESOURCE_NAME_ZAL="zalando.org"
|
||||
CUSTOM_RESOURCE_NAME_ACID="acid.zalan.do"
|
||||
CUSTOM_RESOURCE_VERSION="v1"
|
||||
|
||||
cleanup() {
|
||||
rm -rf "${GENERATED_PACKAGE_ROOT}"
|
||||
}
|
||||
trap "cleanup" EXIT SIGINT
|
||||
SCRIPT_ROOT="$(dirname "${BASH_SOURCE[0]}")/.."
|
||||
|
||||
bash "${CODEGEN_PKG}/generate-groups.sh" client,deepcopy,informer,lister \
|
||||
"${OPERATOR_PACKAGE_ROOT}/pkg/generated" "${OPERATOR_PACKAGE_ROOT}/pkg/apis" \
|
||||
"acid.zalan.do:v1 zalando.org:v1" \
|
||||
--go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt \
|
||||
-o ./
|
||||
OUTPUT_DIR="pkg/generated"
|
||||
OUTPUT_PKG="${GOPKG}/${OUTPUT_DIR}"
|
||||
APIS_PKG="${GOPKG}/pkg/apis"
|
||||
GROUPS_WITH_VERSIONS="${CUSTOM_RESOURCE_NAME_ZAL}:${CUSTOM_RESOURCE_VERSION},${CUSTOM_RESOURCE_NAME_ACID}:${CUSTOM_RESOURCE_VERSION}"
|
||||
|
||||
cp -r "${OPERATOR_PACKAGE_ROOT}"/pkg/* "${TARGET_CODE_DIR}"
|
||||
echo "Generating deepcopy funcs"
|
||||
go tool deepcopy-gen \
|
||||
--output-file zz_generated.deepcopy.go \
|
||||
--bounding-dirs "${APIS_PKG}" \
|
||||
--go-header-file "${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt" \
|
||||
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ZAL}/${CUSTOM_RESOURCE_VERSION}" \
|
||||
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ACID}/${CUSTOM_RESOURCE_VERSION}"
|
||||
|
||||
cleanup
|
||||
echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}"
|
||||
go tool client-gen \
|
||||
--clientset-name versioned \
|
||||
--input-base "${APIS_PKG}" \
|
||||
--input "${CUSTOM_RESOURCE_NAME_ZAL}/${CUSTOM_RESOURCE_VERSION},${CUSTOM_RESOURCE_NAME_ACID}/${CUSTOM_RESOURCE_VERSION}" \
|
||||
--output-pkg "${OUTPUT_PKG}/clientset" \
|
||||
--go-header-file "${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt" \
|
||||
--output-dir "${OUTPUT_DIR}/clientset"
|
||||
|
||||
echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers"
|
||||
go tool lister-gen \
|
||||
--output-pkg "${OUTPUT_PKG}/listers" \
|
||||
--go-header-file "${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt" \
|
||||
--output-dir "${OUTPUT_DIR}/listers" \
|
||||
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ZAL}/${CUSTOM_RESOURCE_VERSION}" \
|
||||
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ACID}/${CUSTOM_RESOURCE_VERSION}"
|
||||
|
||||
echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers"
|
||||
go tool informer-gen \
|
||||
--versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \
|
||||
--listers-package "${OUTPUT_PKG}/listers" \
|
||||
--output-pkg "${OUTPUT_PKG}/informers" \
|
||||
--go-header-file "${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt" \
|
||||
--output-dir "${OUTPUT_DIR}/informers" \
|
||||
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ZAL}/${CUSTOM_RESOURCE_VERSION}" \
|
||||
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ACID}/${CUSTOM_RESOURCE_VERSION}"
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ THE SOFTWARE.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
user "os/user"
|
||||
|
|
@ -121,7 +122,7 @@ func connect(clusterName string, master bool, replica string, psql bool, user st
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = exec.Stream(remotecommand.StreamOptions{
|
||||
err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func version(namespace string) {
|
|||
|
||||
operatorDeployment := getPostgresOperator(client)
|
||||
if operatorDeployment.Name == "" {
|
||||
log.Fatal("make sure zalando's postgres operator is running")
|
||||
log.Fatalf("make sure zalando's postgres operator is running in namespace %s", namespace)
|
||||
}
|
||||
operatorImage := operatorDeployment.Spec.Template.Spec.Containers[0].Image
|
||||
imageDetails := strings.Split(operatorImage, ":")
|
||||
|
|
|
|||
|
|
@ -1,74 +1,72 @@
|
|||
module github.com/zalando/postgres-operator/kubectl-pg
|
||||
|
||||
go 1.23.4
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/zalando/postgres-operator v1.13.0
|
||||
k8s.io/api v0.30.4
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/zalando/postgres-operator v1.15.0
|
||||
k8s.io/api v0.32.9
|
||||
k8s.io/apiextensions-apiserver v0.25.9
|
||||
k8s.io/apimachinery v0.30.4
|
||||
k8s.io/client-go v0.30.4
|
||||
k8s.io/apimachinery v0.32.9
|
||||
k8s.io/client-go v0.32.9
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/afero v1.15.0 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
@ -10,44 +10,42 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER
|
|||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
|
|
@ -63,14 +61,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
|
@ -82,151 +76,131 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
|||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
|
||||
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
|
||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
|
||||
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
|
||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zalando/postgres-operator v1.13.0 h1:T9Mb+ZRQyTxXbagIK66GLVGCwM3661aX2lOkNpax4s8=
|
||||
github.com/zalando/postgres-operator v1.13.0/go.mod h1:WiMEKzUny2lJHYle+7+D/5BhlvPn8prl76rEDYLsQAg=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
github.com/zalando/postgres-operator v1.15.0 h1:is/7cOrpuV7OwMiN7TG7GgiYHKvaWx8Ptw3hJruFO1I=
|
||||
github.com/zalando/postgres-operator v1.15.0/go.mod h1:1cSOA5dG2dEqdG0uami1RHTGYX92bgAKYASfAhuMtHE=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs=
|
||||
k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0=
|
||||
k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI=
|
||||
k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y=
|
||||
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
|
||||
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
|
||||
k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY=
|
||||
k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
||||
k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY=
|
||||
k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs=
|
||||
k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c=
|
||||
k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
|||
|
|
@ -25,11 +25,11 @@ RUN apt-get update \
|
|||
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
|
||||
&& apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
postgresql-client-18 \
|
||||
postgresql-client-17 \
|
||||
postgresql-client-16 \
|
||||
postgresql-client-15 \
|
||||
postgresql-client-14 \
|
||||
postgresql-client-13 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
|
|
|||
|
|
@ -122,7 +122,21 @@ function aws_upload {
|
|||
function gcs_upload {
|
||||
PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz
|
||||
|
||||
gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP"
|
||||
#Set local LOGICAL_GOOGLE_APPLICATION_CREDENTIALS to nothing or
|
||||
#value of LOGICAL_GOOGLE_APPLICATION_CREDENTIALS env var. Needed
|
||||
#because `set -o nounset` is globally set
|
||||
local LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS=${LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS:-}
|
||||
|
||||
GSUTIL_OPTIONS=("-o" "Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS")
|
||||
|
||||
#If GOOGLE_APPLICATION_CREDENTIALS is not set try to get
|
||||
#creds from metadata
|
||||
if [[ -z $LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS ]]
|
||||
then
|
||||
GSUTIL_OPTIONS[1]="GoogleCompute:service_account=default"
|
||||
fi
|
||||
|
||||
gsutil ${GSUTIL_OPTIONS[@]} cp - "$PATH_TO_BACKUP"
|
||||
}
|
||||
|
||||
function upload {
|
||||
|
|
@ -169,6 +183,25 @@ function get_master_pod {
|
|||
get_pods "labelSelector=${CLUSTER_NAME_LABEL}%3D${SCOPE},spilo-role%3Dmaster" | tee | head -n 1
|
||||
}
|
||||
|
||||
# Wait for TCP connectivity to the target PostgreSQL pod.
|
||||
# When NetworkPolicy is enforced via iptables, a newly-created pod's IP may not
|
||||
# yet be present in the destination node's ingress allow lists, causing
|
||||
# cross-node connections to be rejected until the next policy sync.
|
||||
function wait_for_pg {
|
||||
local retries=${LOGICAL_BACKUP_CONNECT_RETRIES:-10}
|
||||
local delay=${LOGICAL_BACKUP_CONNECT_RETRY_DELAY:-2}
|
||||
local i
|
||||
for (( i=1; i<=retries; i++ )); do
|
||||
if "$PG_BIN"/pg_isready -h "$PGHOST" -p "${PGPORT:-5432}" -q 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
echo "waiting for $PGHOST:${PGPORT:-5432} to become reachable (attempt $i/$retries)..."
|
||||
sleep "$delay"
|
||||
done
|
||||
echo "ERROR: $PGHOST:${PGPORT:-5432} not reachable after $((retries * delay))s"
|
||||
return 1
|
||||
}
|
||||
|
||||
CURRENT_NODENAME=$(get_current_pod | jq .items[].spec.nodeName --raw-output)
|
||||
export CURRENT_NODENAME
|
||||
|
||||
|
|
@ -183,6 +216,8 @@ for search in "${search_strategy[@]}"; do
|
|||
|
||||
done
|
||||
|
||||
wait_for_pg
|
||||
|
||||
set -x
|
||||
if [ "$LOGICAL_BACKUP_PROVIDER" == "az" ]; then
|
||||
dump | compress > /tmp/azure-backup.sql.gz
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ metadata:
|
|||
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||
spec:
|
||||
dockerImage: ghcr.io/zalando/spilo-17:4.0-p2
|
||||
dockerImage: ghcr.io/zalando/spilo-18:4.1-p1
|
||||
teamId: "acid"
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
|
|
@ -48,7 +48,7 @@ spec:
|
|||
defaultRoles: true
|
||||
defaultUsers: false
|
||||
postgresql:
|
||||
version: "17"
|
||||
version: "18"
|
||||
parameters: # Expert section
|
||||
shared_buffers: "32MB"
|
||||
max_connections: "10"
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ data:
|
|||
default_memory_request: 100Mi
|
||||
# delete_annotation_date_key: delete-date
|
||||
# delete_annotation_name_key: delete-clustername
|
||||
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
|
||||
docker_image: ghcr.io/zalando/spilo-18:4.1-p1
|
||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||
enable_admin_role_for_users: "true"
|
||||
enable_crd_registration: "true"
|
||||
|
|
@ -46,6 +46,7 @@ data:
|
|||
enable_ebs_gp3_migration_max_size: "1000"
|
||||
enable_init_containers: "true"
|
||||
enable_lazy_spilo_upgrade: "false"
|
||||
enable_maintenance_windows: "true"
|
||||
enable_master_load_balancer: "false"
|
||||
enable_master_pooler_load_balancer: "false"
|
||||
enable_password_rotation: "false"
|
||||
|
|
@ -75,6 +76,7 @@ data:
|
|||
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
||||
# ignore_instance_limits_annotation_key: ""
|
||||
# ignore_resources_limits_annotation_key: ""
|
||||
# inherited_annotations: owned-by
|
||||
# inherited_labels: application,environment
|
||||
# kube_iam_role: ""
|
||||
|
|
@ -86,7 +88,7 @@ data:
|
|||
# logical_backup_cpu_limit: ""
|
||||
# logical_backup_cpu_request: ""
|
||||
logical_backup_cronjob_environment_secret: ""
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
# logical_backup_memory_limit: ""
|
||||
|
|
@ -101,6 +103,7 @@ data:
|
|||
logical_backup_s3_sse: "AES256"
|
||||
logical_backup_s3_retention_time: ""
|
||||
logical_backup_schedule: "30 00 * * *"
|
||||
# maintenance_windows: "Sat:22:00-23:59,Sun:00:00-01:00"
|
||||
major_version_upgrade_mode: "manual"
|
||||
# major_version_upgrade_team_allow_list: ""
|
||||
master_dns_name_format: "{cluster}.{namespace}.{hostedzone}"
|
||||
|
|
@ -112,7 +115,7 @@ data:
|
|||
min_cpu_limit: 250m
|
||||
min_instances: "-1"
|
||||
min_memory_limit: 250Mi
|
||||
minimal_major_version: "13"
|
||||
minimal_major_version: "14"
|
||||
# node_readiness_label: "status:ready"
|
||||
# node_readiness_label_merge: "OR"
|
||||
oauth_token_secret_name: postgresql-operator
|
||||
|
|
@ -162,7 +165,7 @@ data:
|
|||
spilo_privileged: "false"
|
||||
storage_resize_mode: "pvc"
|
||||
super_username: postgres
|
||||
target_major_version: "17"
|
||||
target_major_version: "18"
|
||||
team_admin_role: "admin"
|
||||
team_api_role_configuration: "log_statement:all"
|
||||
teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ spec:
|
|||
preparedDatabases:
|
||||
bar: {}
|
||||
postgresql:
|
||||
version: "13"
|
||||
version: "18"
|
||||
sidecars:
|
||||
- name: "exporter"
|
||||
image: "quay.io/prometheuscommunity/postgres-exporter:v0.15.0"
|
||||
|
|
|
|||
|
|
@ -17,4 +17,4 @@ spec:
|
|||
preparedDatabases:
|
||||
bar: {}
|
||||
postgresql:
|
||||
version: "13"
|
||||
version: "14"
|
||||
|
|
|
|||
|
|
@ -17,4 +17,4 @@ spec:
|
|||
preparedDatabases:
|
||||
bar: {}
|
||||
postgresql:
|
||||
version: "17"
|
||||
version: "18"
|
||||
|
|
|
|||
|
|
@ -59,13 +59,20 @@ rules:
|
|||
- get
|
||||
- patch
|
||||
- update
|
||||
# to read configuration from ConfigMaps
|
||||
# to read configuration from ConfigMaps and help Patroni manage the cluster if endpoints are not used
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to send events to the CRs
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
@ -78,7 +85,7 @@ rules:
|
|||
- patch
|
||||
- update
|
||||
- watch
|
||||
# to manage endpoints which are also used by Patroni
|
||||
# to manage endpoints which are also used by Patroni (if it is using config maps)
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
@ -249,7 +256,21 @@ kind: ClusterRole
|
|||
metadata:
|
||||
name: postgres-pod
|
||||
rules:
|
||||
# Patroni needs to watch and manage endpoints
|
||||
# Patroni needs to watch and manage config maps (or endpoints)
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
# Patroni needs to watch and manage endpoints (or config maps)
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ spec:
|
|||
type: string
|
||||
docker_image:
|
||||
type: string
|
||||
default: "ghcr.io/zalando/spilo-17:4.0-p2"
|
||||
default: "ghcr.io/zalando/spilo-18:4.1-p1"
|
||||
enable_crd_registration:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -77,6 +77,9 @@ spec:
|
|||
enable_lazy_spilo_upgrade:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_maintenance_windows:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_pgversion_env_var:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -94,9 +97,16 @@ spec:
|
|||
default: ""
|
||||
ignore_instance_limits_annotation_key:
|
||||
type: string
|
||||
ignore_resources_limits_annotation_key:
|
||||
type: string
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
default: false
|
||||
maintenance_windows:
|
||||
items:
|
||||
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
|
||||
type: string
|
||||
type: array
|
||||
max_instances:
|
||||
type: integer
|
||||
description: "-1 = disabled"
|
||||
|
|
@ -165,10 +175,10 @@ spec:
|
|||
type: string
|
||||
minimal_major_version:
|
||||
type: string
|
||||
default: "13"
|
||||
default: "14"
|
||||
target_major_version:
|
||||
type: string
|
||||
default: "17"
|
||||
default: "18"
|
||||
kubernetes:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -508,7 +518,7 @@ spec:
|
|||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: ghcr.io/zalando/postgres-operator:v1.14.0
|
||||
image: ghcr.io/zalando/postgres-operator:v1.15.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -3,18 +3,23 @@ kind: OperatorConfiguration
|
|||
metadata:
|
||||
name: postgresql-operator-default-configuration
|
||||
configuration:
|
||||
docker_image: ghcr.io/zalando/spilo-17:4.0-p2
|
||||
docker_image: ghcr.io/zalando/spilo-18:4.1-p1
|
||||
# enable_crd_registration: true
|
||||
# crd_categories:
|
||||
# - all
|
||||
# enable_lazy_spilo_upgrade: false
|
||||
enable_maintenance_windows: true
|
||||
enable_pgversion_env_var: true
|
||||
# enable_shm_volume: true
|
||||
enable_spilo_wal_path_compat: false
|
||||
enable_team_id_clustername_prefix: false
|
||||
etcd_host: ""
|
||||
# ignore_instance_limits_annotation_key: ""
|
||||
# ignore_resources_limits_annotation_key: ""
|
||||
# kubernetes_use_configmaps: false
|
||||
# maintenance_windows:
|
||||
# - "Sat:22:00-23:59"
|
||||
# - "Sun:00:00-01:00"
|
||||
max_instances: -1
|
||||
min_instances: -1
|
||||
resync_period: 30m
|
||||
|
|
@ -39,8 +44,8 @@ configuration:
|
|||
major_version_upgrade_mode: "manual"
|
||||
# major_version_upgrade_team_allow_list:
|
||||
# - acid
|
||||
minimal_major_version: "13"
|
||||
target_major_version: "17"
|
||||
minimal_major_version: "14"
|
||||
target_major_version: "18"
|
||||
kubernetes:
|
||||
# additional_pod_capabilities:
|
||||
# - "SYS_NICE"
|
||||
|
|
@ -168,7 +173,7 @@ configuration:
|
|||
# logical_backup_cpu_request: ""
|
||||
# logical_backup_memory_limit: ""
|
||||
# logical_backup_memory_request: ""
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"
|
||||
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,68 +1,82 @@
|
|||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.17.3
|
||||
name: postgresteams.acid.zalan.do
|
||||
spec:
|
||||
group: acid.zalan.do
|
||||
names:
|
||||
categories:
|
||||
- all
|
||||
kind: PostgresTeam
|
||||
listKind: PostgresTeamList
|
||||
plural: postgresteams
|
||||
singular: postgresteam
|
||||
shortNames:
|
||||
- pgteam
|
||||
categories:
|
||||
- all
|
||||
singular: postgresteam
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: PostgresTeam defines Custom Resource Definition Object for team
|
||||
management.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: PostgresTeamSpec defines the specification for the PostgresTeam
|
||||
TPR.
|
||||
properties:
|
||||
additionalMembers:
|
||||
additionalProperties:
|
||||
description: List of users who will also be added to the Postgres
|
||||
cluster.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
description: Map for teamId and associated additional users
|
||||
type: object
|
||||
additionalSuperuserTeams:
|
||||
additionalProperties:
|
||||
description: List of teams to become Postgres superusers
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
description: Map for teamId and associated additional superuser teams
|
||||
type: object
|
||||
additionalTeams:
|
||||
additionalProperties:
|
||||
description: List of teams whose members will also be added to the
|
||||
Postgres cluster.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
description: Map for teamId and associated additional teams
|
||||
type: object
|
||||
type: object
|
||||
required:
|
||||
- metadata
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
required:
|
||||
- kind
|
||||
- apiVersion
|
||||
- spec
|
||||
properties:
|
||||
kind:
|
||||
type: string
|
||||
enum:
|
||||
- PostgresTeam
|
||||
apiVersion:
|
||||
type: string
|
||||
enum:
|
||||
- acid.zalan.do/v1
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
additionalSuperuserTeams:
|
||||
type: object
|
||||
description: "Map for teamId and associated additional superuser teams"
|
||||
additionalProperties:
|
||||
type: array
|
||||
nullable: true
|
||||
description: "List of teams to become Postgres superusers"
|
||||
items:
|
||||
type: string
|
||||
additionalTeams:
|
||||
type: object
|
||||
description: "Map for teamId and associated additional teams"
|
||||
additionalProperties:
|
||||
type: array
|
||||
nullable: true
|
||||
description: "List of teams whose members will also be added to the Postgres cluster"
|
||||
items:
|
||||
type: string
|
||||
additionalMembers:
|
||||
type: object
|
||||
description: "Map for teamId and associated additional users"
|
||||
additionalProperties:
|
||||
type: array
|
||||
nullable: true
|
||||
description: "List of users who will also be added to the Postgres cluster"
|
||||
items:
|
||||
type: string
|
||||
|
|
|
|||
|
|
@ -8,8 +8,10 @@ spec:
|
|||
size: 1Gi
|
||||
numberOfInstances: 1
|
||||
postgresql:
|
||||
version: "17"
|
||||
# Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming.
|
||||
version: "18"
|
||||
# Make this a standby cluster. You can specify s3_wal_path or gs_wal_path for WAL archive,
|
||||
# standby_host for remote primary streaming, or combine standby_host with either WAL path.
|
||||
# Note: s3_wal_path and gs_wal_path are mutually exclusive.
|
||||
standby:
|
||||
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"
|
||||
standby_host: "acid-minimal-cluster.default"
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -31,7 +31,8 @@ func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error {
|
|||
err error
|
||||
)
|
||||
|
||||
parts := strings.Split(string(data[1:len(data)-1]), "-")
|
||||
dataStr := strings.Trim(string(data), "\"")
|
||||
parts := strings.Split(dataStr, "-")
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("incorrect maintenance window format")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,8 +49,8 @@ type PostgresUsersConfiguration struct {
|
|||
type MajorVersionUpgradeConfiguration struct {
|
||||
MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"manual"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
|
||||
MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"`
|
||||
MinimalMajorVersion string `json:"minimal_major_version" default:"13"`
|
||||
TargetMajorVersion string `json:"target_major_version" default:"17"`
|
||||
MinimalMajorVersion string `json:"minimal_major_version" default:"14"`
|
||||
TargetMajorVersion string `json:"target_major_version" default:"18"`
|
||||
}
|
||||
|
||||
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
|
||||
|
|
@ -266,6 +266,8 @@ type OperatorConfigurationData struct {
|
|||
Workers uint32 `json:"workers,omitempty"`
|
||||
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
||||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||
EnableMaintenanceWindows *bool `json:"enable_maintenance_windows,omitempty"`
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenance_windows,omitempty"`
|
||||
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
||||
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
||||
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers
|
||||
|
|
@ -285,9 +287,10 @@ type OperatorConfigurationData struct {
|
|||
ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"`
|
||||
Patroni PatroniConfiguration `json:"patroni"`
|
||||
|
||||
MinInstances int32 `json:"min_instances,omitempty"`
|
||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
|
||||
MinInstances int32 `json:"min_instances,omitempty"`
|
||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
|
||||
IgnoreResourcesLimitsAnnotationKey string `json:"ignore_resources_limits_annotation_key,omitempty"`
|
||||
}
|
||||
|
||||
// Duration shortens this frequently used name
|
||||
|
|
|
|||
|
|
@ -8,18 +8,33 @@ import (
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PostgresTeam defines Custom Resource Definition Object for team management.
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +kubebuilder:resource:shortName=pgteam,categories=all
|
||||
// +kubebuilder:subresource:status
|
||||
type PostgresTeam struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec PostgresTeamSpec `json:"spec"`
|
||||
}
|
||||
|
||||
// List of users who will also be added to the Postgres cluster.
|
||||
type Users []string
|
||||
|
||||
// List of teams whose members will also be added to the Postgres cluster.
|
||||
type Teams []string
|
||||
|
||||
// List of teams to become Postgres superusers
|
||||
type SuperUserTeams []string
|
||||
|
||||
// PostgresTeamSpec defines the specification for the PostgresTeam TPR.
|
||||
type PostgresTeamSpec struct {
|
||||
AdditionalSuperuserTeams map[string][]string `json:"additionalSuperuserTeams,omitempty"`
|
||||
AdditionalTeams map[string][]string `json:"additionalTeams,omitempty"`
|
||||
AdditionalMembers map[string][]string `json:"additionalMembers,omitempty"`
|
||||
// Map for teamId and associated additional superuser teams
|
||||
AdditionalSuperuserTeams map[string]SuperUserTeams `json:"additionalSuperuserTeams,omitempty"`
|
||||
// Map for teamId and associated additional teams
|
||||
AdditionalTeams map[string]Teams `json:"additionalTeams,omitempty"`
|
||||
// Map for teamId and associated additional users
|
||||
AdditionalMembers map[string]Users `json:"additionalMembers,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -11,13 +11,25 @@ import (
|
|||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// Postgresql defines PostgreSQL Custom Resource Definition Object.
|
||||
// +kubebuilder:resource:categories=all,shortName=pg,scope=Namespaced
|
||||
// +kubebuilder:printcolumn:name="Team",type=string,JSONPath=`.spec.teamId`,description="Team responsible for Postgres cluster"
|
||||
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.postgresql.version`,description="PostgreSQL version"
|
||||
// +kubebuilder:printcolumn:name="Pods",type=integer,JSONPath=`.spec.numberOfInstances`,description="Number of Pods per Postgres cluster"
|
||||
// +kubebuilder:printcolumn:name="Volume",type=string,JSONPath=`.spec.volume.size`,description="Size of the bound volume"
|
||||
// +kubebuilder:printcolumn:name="CPU-Request",type=string,JSONPath=`.spec.resources.requests.cpu`,description="Requested CPU for Postgres containers"
|
||||
// +kubebuilder:printcolumn:name="Memory-Request",type=string,JSONPath=`.spec.resources.requests.memory`,description="Requested memory for Postgres containers"
|
||||
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="Age of the PostgreSQL cluster"
|
||||
// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.PostgresClusterStatus`,description="Current sync status of postgresql resource"
|
||||
// +kubebuilder:subresource:status
|
||||
type Postgresql struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec PostgresSpec `json:"spec"`
|
||||
Spec PostgresSpec `json:"spec"`
|
||||
// +optional
|
||||
Status PostgresStatus `json:"status"`
|
||||
Error string `json:"-"`
|
||||
}
|
||||
|
|
@ -25,9 +37,10 @@ type Postgresql struct {
|
|||
// PostgresSpec defines the specification for the PostgreSQL TPR.
|
||||
type PostgresSpec struct {
|
||||
PostgresqlParam `json:"postgresql"`
|
||||
Volume `json:"volume,omitempty"`
|
||||
Patroni `json:"patroni,omitempty"`
|
||||
*Resources `json:"resources,omitempty"`
|
||||
Volume `json:"volume"`
|
||||
// +optional
|
||||
Patroni `json:"patroni"`
|
||||
*Resources `json:"resources,omitempty"`
|
||||
|
||||
EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"`
|
||||
EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"`
|
||||
|
|
@ -52,20 +65,33 @@ type PostgresSpec struct {
|
|||
|
||||
// deprecated load balancer settings maintained for backward compatibility
|
||||
// see "Load balancers" operator docs
|
||||
UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"`
|
||||
UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"`
|
||||
// deprecated
|
||||
ReplicaLoadBalancer *bool `json:"replicaLoadBalancer,omitempty"`
|
||||
|
||||
// load balancers' source ranges are the same for master and replica services
|
||||
// +nullable
|
||||
// +kubebuilder:validation:items:Pattern=`^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$`
|
||||
// +optional
|
||||
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
||||
|
||||
Users map[string]UserFlags `json:"users,omitempty"`
|
||||
UsersIgnoringSecretRotation []string `json:"usersIgnoringSecretRotation,omitempty"`
|
||||
UsersWithSecretRotation []string `json:"usersWithSecretRotation,omitempty"`
|
||||
UsersWithInPlaceSecretRotation []string `json:"usersWithInPlaceSecretRotation,omitempty"`
|
||||
Users map[string]UserFlags `json:"users,omitempty"`
|
||||
// +nullable
|
||||
UsersIgnoringSecretRotation []string `json:"usersIgnoringSecretRotation,omitempty"`
|
||||
// +nullable
|
||||
UsersWithSecretRotation []string `json:"usersWithSecretRotation,omitempty"`
|
||||
// +nullable
|
||||
UsersWithInPlaceSecretRotation []string `json:"usersWithInPlaceSecretRotation,omitempty"`
|
||||
|
||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||
Clone *CloneDescription `json:"clone,omitempty"`
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||
// +kubebuilder:validation:Schemaless
|
||||
// +kubebuilder:validation:Type=array
|
||||
// +kubebuilde:validation:items:Type=string
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||
Clone *CloneDescription `json:"clone,omitempty"`
|
||||
// Note: usernames specified here as database owners must be declared
|
||||
// in the users key of the spec key.
|
||||
Databases map[string]string `json:"databases,omitempty"`
|
||||
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||
SchedulerName *string `json:"schedulerName,omitempty"`
|
||||
|
|
@ -77,10 +103,11 @@ type PostgresSpec struct {
|
|||
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
||||
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
||||
LogicalBackupRetention string `json:"logicalBackupRetention,omitempty"`
|
||||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
|
||||
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
||||
// +kubebuilder:validation:Pattern=`^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$`
|
||||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
|
||||
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
||||
// MasterServiceAnnotations takes precedence over ServiceAnnotations for master role if not empty
|
||||
MasterServiceAnnotations map[string]string `json:"masterServiceAnnotations,omitempty"`
|
||||
// ReplicaServiceAnnotations takes precedence over ServiceAnnotations for replica role if not empty
|
||||
|
|
@ -90,9 +117,10 @@ type PostgresSpec struct {
|
|||
Streams []Stream `json:"streams,omitempty"`
|
||||
Env []v1.EnvVar `json:"env,omitempty"`
|
||||
|
||||
// deprecated json tags
|
||||
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
||||
PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"`
|
||||
// deprecated
|
||||
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
||||
// deprecated
|
||||
PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
|
@ -123,50 +151,84 @@ type PreparedSchema struct {
|
|||
type MaintenanceWindow struct {
|
||||
Everyday bool `json:"everyday,omitempty"`
|
||||
Weekday time.Weekday `json:"weekday,omitempty"`
|
||||
StartTime metav1.Time `json:"startTime,omitempty"`
|
||||
EndTime metav1.Time `json:"endTime,omitempty"`
|
||||
StartTime metav1.Time `json:"startTime"`
|
||||
EndTime metav1.Time `json:"endTime"`
|
||||
}
|
||||
|
||||
// Volume describes a single volume in the manifest.
|
||||
type Volume struct {
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty"`
|
||||
Size string `json:"size"`
|
||||
StorageClass string `json:"storageClass,omitempty"`
|
||||
SubPath string `json:"subPath,omitempty"`
|
||||
IsSubPathExpr *bool `json:"isSubPathExpr,omitempty"`
|
||||
Iops *int64 `json:"iops,omitempty"`
|
||||
Throughput *int64 `json:"throughput,omitempty"`
|
||||
VolumeType string `json:"type,omitempty"`
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty"`
|
||||
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||
Size string `json:"size"`
|
||||
StorageClass string `json:"storageClass,omitempty"`
|
||||
SubPath string `json:"subPath,omitempty"`
|
||||
IsSubPathExpr *bool `json:"isSubPathExpr,omitempty"`
|
||||
Iops *int64 `json:"iops,omitempty"`
|
||||
Throughput *int64 `json:"throughput,omitempty"`
|
||||
VolumeType string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// AdditionalVolume specs additional optional volumes for statefulset
|
||||
type AdditionalVolume struct {
|
||||
Name string `json:"name"`
|
||||
MountPath string `json:"mountPath"`
|
||||
SubPath string `json:"subPath,omitempty"`
|
||||
IsSubPathExpr *bool `json:"isSubPathExpr,omitempty"`
|
||||
TargetContainers []string `json:"targetContainers"`
|
||||
VolumeSource v1.VolumeSource `json:"volumeSource"`
|
||||
Name string `json:"name"`
|
||||
MountPath string `json:"mountPath"`
|
||||
SubPath string `json:"subPath,omitempty"`
|
||||
IsSubPathExpr *bool `json:"isSubPathExpr,omitempty"`
|
||||
// +nullable
|
||||
// +optional
|
||||
TargetContainers []string `json:"targetContainers"`
|
||||
// +kubebuilder:validation:XPreserveUnknownFields
|
||||
// +kubebuilder:validation:Type=object
|
||||
// +kubebuilder:validation:Schemaless
|
||||
VolumeSource v1.VolumeSource `json:"volumeSource"`
|
||||
}
|
||||
|
||||
// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
|
||||
type PostgresqlParam struct {
|
||||
// +kubebuilder:validation:Enum="14";"15";"16";"17";"18"
|
||||
PgVersion string `json:"version"`
|
||||
Parameters map[string]string `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
// ResourceDescription describes CPU and memory resources defined for a cluster.
|
||||
type ResourceDescription struct {
|
||||
CPU *string `json:"cpu,omitempty"`
|
||||
Memory *string `json:"memory,omitempty"`
|
||||
// Decimal natural followed by m, or decimal natural followed by
|
||||
// dot followed by up to three decimal digits.
|
||||
//
|
||||
// This is because the Kubernetes CPU resource has millis as the
|
||||
// maximum precision. The actual values are checked in code
|
||||
// because the regular expression would be huge and horrible and
|
||||
// not very helpful in validation error messages; this one checks
|
||||
// only the format of the given number.
|
||||
//
|
||||
// https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
||||
//
|
||||
// Note: the value specified here must not be zero or be lower
|
||||
// than the corresponding request.
|
||||
// +kubebuilder:validation:Pattern=`^(\d+m|\d+(\.\d{1,3})?)$`
|
||||
CPU *string `json:"cpu,omitempty"`
|
||||
// You can express memory as a plain integer or as a fixed-point
|
||||
// integer using one of these suffixes: E, P, T, G, M, k. You can
|
||||
// also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki
|
||||
//
|
||||
// https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory
|
||||
//
|
||||
// Note: the value specified here must not be zero or be higher
|
||||
// than the corresponding limit.
|
||||
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||
Memory *string `json:"memory,omitempty"`
|
||||
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||
HugePages2Mi *string `json:"hugepages-2Mi,omitempty"`
|
||||
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||
HugePages1Gi *string `json:"hugepages-1Gi,omitempty"`
|
||||
}
|
||||
|
||||
// Resources describes requests and limits for the cluster resouces.
|
||||
type Resources struct {
|
||||
ResourceRequests ResourceDescription `json:"requests,omitempty"`
|
||||
ResourceLimits ResourceDescription `json:"limits,omitempty"`
|
||||
// +optional
|
||||
ResourceRequests ResourceDescription `json:"requests"`
|
||||
// +optional
|
||||
ResourceLimits ResourceDescription `json:"limits"`
|
||||
}
|
||||
|
||||
// Patroni contains Patroni-specific configuration
|
||||
|
|
@ -176,7 +238,7 @@ type Patroni struct {
|
|||
TTL uint32 `json:"ttl,omitempty"`
|
||||
LoopWait uint32 `json:"loop_wait,omitempty"`
|
||||
RetryTimeout uint32 `json:"retry_timeout,omitempty"`
|
||||
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
|
||||
MaximumLagOnFailover int64 `json:"maximum_lag_on_failover,omitempty"`
|
||||
Slots map[string]map[string]string `json:"slots,omitempty"`
|
||||
SynchronousMode bool `json:"synchronous_mode,omitempty"`
|
||||
SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"`
|
||||
|
|
@ -184,16 +246,20 @@ type Patroni struct {
|
|||
FailsafeMode *bool `json:"failsafe_mode,omitempty"`
|
||||
}
|
||||
|
||||
// StandbyDescription contains remote primary config or s3/gs wal path
|
||||
// StandbyDescription contains remote primary config and/or s3/gs wal path.
|
||||
// standby_host can be specified alone or together with either s3_wal_path OR gs_wal_path (mutually exclusive).
|
||||
// At least one field must be specified. s3_wal_path and gs_wal_path are mutually exclusive.
|
||||
type StandbyDescription struct {
|
||||
S3WalPath string `json:"s3_wal_path,omitempty"`
|
||||
GSWalPath string `json:"gs_wal_path,omitempty"`
|
||||
StandbyHost string `json:"standby_host,omitempty"`
|
||||
StandbyPort string `json:"standby_port,omitempty"`
|
||||
S3WalPath string `json:"s3_wal_path,omitempty"`
|
||||
GSWalPath string `json:"gs_wal_path,omitempty"`
|
||||
StandbyHost string `json:"standby_host,omitempty"`
|
||||
StandbyPort string `json:"standby_port,omitempty"`
|
||||
StandbyPrimarySlotName string `json:"standby_primary_slot_name,omitempty"`
|
||||
}
|
||||
|
||||
// TLSDescription specs TLS properties
|
||||
type TLSDescription struct {
|
||||
// +required
|
||||
SecretName string `json:"secretName,omitempty"`
|
||||
CertificateFile string `json:"certificateFile,omitempty"`
|
||||
PrivateKeyFile string `json:"privateKeyFile,omitempty"`
|
||||
|
|
@ -203,8 +269,14 @@ type TLSDescription struct {
|
|||
|
||||
// CloneDescription describes which cluster the new should clone and up to which point in time
|
||||
type CloneDescription struct {
|
||||
ClusterName string `json:"cluster,omitempty"`
|
||||
UID string `json:"uid,omitempty"`
|
||||
// +required
|
||||
ClusterName string `json:"cluster,omitempty"`
|
||||
// +kubebuilder:validation:Format=uuid
|
||||
UID string `json:"uid,omitempty"`
|
||||
// The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC
|
||||
// Example: 1996-12-19T16:39:57-08:00
|
||||
// Note: this field requires a timezone
|
||||
// +kubebuilder:validation:Pattern=`^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$`
|
||||
EndTimestamp string `json:"timestamp,omitempty"`
|
||||
S3WalPath string `json:"s3_wal_path,omitempty"`
|
||||
S3Endpoint string `json:"s3_endpoint,omitempty"`
|
||||
|
|
@ -224,6 +296,8 @@ type Sidecar struct {
|
|||
}
|
||||
|
||||
// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users
|
||||
// +kubebuilder:validation:items:Enum=bypassrls;BYPASSRLS;nobypassrls;NOBYPASSRLS;createdb;CREATEDB;nocreatedb;NOCREATEDB;createrole;CREATEROLE;nocreaterole;NOCREATEROLE;inherit;INHERIT;noinherit;NOINHERIT;login;LOGIN;nologin;NOLOGIN;replication;REPLICATION;noreplication;NOREPLICATION;superuser;SUPERUSER;nosuperuser;NOSUPERUSER
|
||||
// +nullable
|
||||
type UserFlags []string
|
||||
|
||||
// PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.)
|
||||
|
|
@ -242,26 +316,30 @@ type PostgresStatus struct {
|
|||
// makes sense to expose. E.g. pool size (min/max boundaries), max client
|
||||
// connections etc.
|
||||
type ConnectionPooler struct {
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
NumberOfInstances *int32 `json:"numberOfInstances,omitempty"`
|
||||
Schema string `json:"schema,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
Mode string `json:"mode,omitempty"`
|
||||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
MaxDBConnections *int32 `json:"maxDBConnections,omitempty"`
|
||||
// +kubebuilder:validation:Enum=session;transaction
|
||||
Mode string `json:"mode,omitempty"`
|
||||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
MaxDBConnections *int32 `json:"maxDBConnections,omitempty"`
|
||||
|
||||
*Resources `json:"resources,omitempty"`
|
||||
}
|
||||
|
||||
// Stream defines properties for creating FabricEventStream resources
|
||||
type Stream struct {
|
||||
ApplicationId string `json:"applicationId"`
|
||||
Database string `json:"database"`
|
||||
Tables map[string]StreamTable `json:"tables"`
|
||||
Filter map[string]*string `json:"filter,omitempty"`
|
||||
BatchSize *uint32 `json:"batchSize,omitempty"`
|
||||
CPU *string `json:"cpu,omitempty"`
|
||||
Memory *string `json:"memory,omitempty"`
|
||||
EnableRecovery *bool `json:"enableRecovery,omitempty"`
|
||||
ApplicationId string `json:"applicationId"`
|
||||
Database string `json:"database"`
|
||||
Tables map[string]StreamTable `json:"tables"`
|
||||
Filter map[string]*string `json:"filter,omitempty"`
|
||||
BatchSize *uint32 `json:"batchSize,omitempty"`
|
||||
// +kubebuilder:validation:Pattern=`^(\d+m|\d+(\.\d{1,3})?)$`
|
||||
CPU *string `json:"cpu,omitempty"`
|
||||
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||
Memory *string `json:"memory,omitempty"`
|
||||
EnableRecovery *bool `json:"enableRecovery,omitempty"`
|
||||
}
|
||||
|
||||
// StreamTable defines properties of outbox tables for FabricEventStreams
|
||||
|
|
|
|||
|
|
@ -91,6 +91,13 @@ var maintenanceWindows = []struct {
|
|||
StartTime: mustParseTime("10:00"),
|
||||
EndTime: mustParseTime("20:00"),
|
||||
}, nil},
|
||||
{"regular every day scenario",
|
||||
[]byte(`"05:00-07:00"`),
|
||||
MaintenanceWindow{
|
||||
Everyday: true,
|
||||
StartTime: mustParseTime("05:00"),
|
||||
EndTime: mustParseTime("07:00"),
|
||||
}, nil},
|
||||
{"starts and ends at the same time",
|
||||
[]byte(`"Mon:10:00-10:00"`),
|
||||
MaintenanceWindow{
|
||||
|
|
@ -219,7 +226,7 @@ var unmarshalCluster = []struct {
|
|||
"127.0.0.1/32"
|
||||
],
|
||||
"postgresql": {
|
||||
"version": "17",
|
||||
"version": "18",
|
||||
"parameters": {
|
||||
"shared_buffers": "32MB",
|
||||
"max_connections": "10",
|
||||
|
|
@ -279,7 +286,7 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
Spec: PostgresSpec{
|
||||
PostgresqlParam: PostgresqlParam{
|
||||
PgVersion: "17",
|
||||
PgVersion: "18",
|
||||
Parameters: map[string]string{
|
||||
"shared_buffers": "32MB",
|
||||
"max_connections": "10",
|
||||
|
|
@ -339,7 +346,7 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
Error: "",
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"17","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"18","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
err: nil},
|
||||
{
|
||||
about: "example with clone",
|
||||
|
|
@ -404,7 +411,7 @@ var postgresqlList = []struct {
|
|||
out PostgresqlList
|
||||
err error
|
||||
}{
|
||||
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"17"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
||||
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"18"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
||||
PostgresqlList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "List",
|
||||
|
|
@ -425,7 +432,7 @@ var postgresqlList = []struct {
|
|||
},
|
||||
Spec: PostgresSpec{
|
||||
ClusterName: "testcluster42",
|
||||
PostgresqlParam: PostgresqlParam{PgVersion: "17"},
|
||||
PostgresqlParam: PostgresqlParam{PgVersion: "18"},
|
||||
Volume: Volume{Size: "10Gi"},
|
||||
TeamID: "acid",
|
||||
AllowedSourceRanges: []string{"185.85.220.0/22"},
|
||||
|
|
@ -787,8 +794,6 @@ func TestPostgresListMeta(t *testing.T) {
|
|||
if a := tt.out.GetListMeta(); reflect.DeepEqual(a, tt.out.ListMeta) {
|
||||
t.Errorf("GetObjectMeta expected: %v, got: %v", tt.out.ListMeta, a)
|
||||
}
|
||||
|
||||
return
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
@ -433,6 +433,18 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.EnableMaintenanceWindows != nil {
|
||||
in, out := &in.EnableMaintenanceWindows, &out.EnableMaintenanceWindows
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaintenanceWindows != nil {
|
||||
in, out := &in.MaintenanceWindows, &out.MaintenanceWindows
|
||||
*out = make([]MaintenanceWindow, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ShmVolume != nil {
|
||||
in, out := &in.ShmVolume, &out.ShmVolume
|
||||
*out = new(bool)
|
||||
|
|
@ -975,14 +987,14 @@ func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) {
|
|||
*out = *in
|
||||
if in.AdditionalSuperuserTeams != nil {
|
||||
in, out := &in.AdditionalSuperuserTeams, &out.AdditionalSuperuserTeams
|
||||
*out = make(map[string][]string, len(*in))
|
||||
*out = make(map[string]SuperUserTeams, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal []string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
in, out := &val, &outVal
|
||||
*out = make([]string, len(*in))
|
||||
*out = make(SuperUserTeams, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
|
|
@ -990,14 +1002,14 @@ func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) {
|
|||
}
|
||||
if in.AdditionalTeams != nil {
|
||||
in, out := &in.AdditionalTeams, &out.AdditionalTeams
|
||||
*out = make(map[string][]string, len(*in))
|
||||
*out = make(map[string]Teams, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal []string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
in, out := &val, &outVal
|
||||
*out = make([]string, len(*in))
|
||||
*out = make(Teams, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
|
|
@ -1005,14 +1017,14 @@ func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) {
|
|||
}
|
||||
if in.AdditionalMembers != nil {
|
||||
in, out := &in.AdditionalMembers, &out.AdditionalMembers
|
||||
*out = make(map[string][]string, len(*in))
|
||||
*out = make(map[string]Users, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal []string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
in, out := &val, &outVal
|
||||
*out = make([]string, len(*in))
|
||||
*out = make(Users, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
|
|
@ -1400,6 +1412,26 @@ func (in *StreamTable) DeepCopy() *StreamTable {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in SuperUserTeams) DeepCopyInto(out *SuperUserTeams) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(SuperUserTeams, len(*in))
|
||||
copy(*out, *in)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuperUserTeams.
|
||||
func (in SuperUserTeams) DeepCopy() SuperUserTeams {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SuperUserTeams)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TLSDescription) DeepCopyInto(out *TLSDescription) {
|
||||
*out = *in
|
||||
|
|
@ -1416,6 +1448,26 @@ func (in *TLSDescription) DeepCopy() *TLSDescription {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in Teams) DeepCopyInto(out *Teams) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(Teams, len(*in))
|
||||
copy(*out, *in)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Teams.
|
||||
func (in Teams) DeepCopy() Teams {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Teams)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) {
|
||||
*out = *in
|
||||
|
|
@ -1469,6 +1521,26 @@ func (in UserFlags) DeepCopy() UserFlags {
|
|||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in Users) DeepCopyInto(out *Users) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(Users, len(*in))
|
||||
copy(*out, *in)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Users.
|
||||
func (in Users) DeepCopy() Users {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Users)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Volume) DeepCopyInto(out *Volume) {
|
||||
*out = *in
|
||||
|
|
|
|||
|
|
@ -9,14 +9,16 @@ import (
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// FabricEventStream defines FabricEventStream Custom Resource Definition Object.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type FabricEventStream struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec FabricEventStreamSpec `json:"spec"`
|
||||
}
|
||||
|
||||
// FabricEventStreamSpec defines the specification for the FabricEventStream TPR.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type FabricEventStreamSpec struct {
|
||||
ApplicationId string `json:"applicationId"`
|
||||
EventStreams []EventStream `json:"eventStreams"`
|
||||
|
|
@ -25,6 +27,7 @@ type FabricEventStreamSpec struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// FabricEventStreamList defines a list of FabricEventStreams .
|
||||
// +k8s:deepcopy-gen=true
|
||||
type FabricEventStreamList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
|
@ -33,6 +36,7 @@ type FabricEventStreamList struct {
|
|||
}
|
||||
|
||||
// EventStream defines the source, flow and sink of the event stream
|
||||
// +k8s:deepcopy-gen=true
|
||||
type EventStream struct {
|
||||
EventStreamFlow EventStreamFlow `json:"flow"`
|
||||
EventStreamSink EventStreamSink `json:"sink"`
|
||||
|
|
@ -41,12 +45,14 @@ type EventStream struct {
|
|||
}
|
||||
|
||||
// EventStreamFlow defines the flow characteristics of the event stream
|
||||
// +k8s:deepcopy-gen=true
|
||||
type EventStreamFlow struct {
|
||||
Type string `json:"type"`
|
||||
PayloadColumn *string `json:"payloadColumn,omitempty"`
|
||||
}
|
||||
|
||||
// EventStreamSink defines the target of the event stream
|
||||
// +k8s:deepcopy-gen=true
|
||||
type EventStreamSink struct {
|
||||
Type string `json:"type"`
|
||||
EventType string `json:"eventType,omitempty"`
|
||||
|
|
@ -54,12 +60,14 @@ type EventStreamSink struct {
|
|||
}
|
||||
|
||||
// EventStreamRecovery defines the target of dead letter queue
|
||||
// +k8s:deepcopy-gen=true
|
||||
type EventStreamRecovery struct {
|
||||
Type string `json:"type"`
|
||||
Sink *EventStreamSink `json:"sink"`
|
||||
}
|
||||
|
||||
// EventStreamSource defines the source of the event stream and connection for FES operator
|
||||
// +k8s:deepcopy-gen=true
|
||||
type EventStreamSource struct {
|
||||
Type string `json:"type"`
|
||||
Schema string `json:"schema,omitempty" defaults:"public"`
|
||||
|
|
@ -69,12 +77,14 @@ type EventStreamSource struct {
|
|||
}
|
||||
|
||||
// EventStreamTable defines the name and ID column to be used for streaming
|
||||
// +k8s:deepcopy-gen=true
|
||||
type EventStreamTable struct {
|
||||
Name string `json:"name"`
|
||||
IDColumn *string `json:"idColumn,omitempty"`
|
||||
}
|
||||
|
||||
// Connection to be used for allowing the FES operator to connect to a database
|
||||
// +k8s:deepcopy-gen=true
|
||||
type Connection struct {
|
||||
Url string `json:"jdbcUrl"`
|
||||
SlotName string `json:"slotName"`
|
||||
|
|
@ -84,6 +94,7 @@ type Connection struct {
|
|||
}
|
||||
|
||||
// DBAuth specifies the credentials to be used for connecting with the database
|
||||
// +k8s:deepcopy-gen=true
|
||||
type DBAuth struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name,omitempty"`
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
@ -38,11 +39,11 @@ func (in *Connection) DeepCopyInto(out *Connection) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
in.DBAuth.DeepCopyInto(&out.DBAuth)
|
||||
out.DBAuth = in.DBAuth
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection.
|
||||
func (in *Connection) DeepCopy() *Connection {
|
||||
if in == nil {
|
||||
return nil
|
||||
|
|
@ -58,7 +59,7 @@ func (in *DBAuth) DeepCopyInto(out *DBAuth) {
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBAuth.
|
||||
func (in *DBAuth) DeepCopy() *DBAuth {
|
||||
if in == nil {
|
||||
return nil
|
||||
|
|
@ -72,13 +73,13 @@ func (in *DBAuth) DeepCopy() *DBAuth {
|
|||
func (in *EventStream) DeepCopyInto(out *EventStream) {
|
||||
*out = *in
|
||||
in.EventStreamFlow.DeepCopyInto(&out.EventStreamFlow)
|
||||
in.EventStreamRecovery.DeepCopyInto(&out.EventStreamRecovery)
|
||||
in.EventStreamSink.DeepCopyInto(&out.EventStreamSink)
|
||||
in.EventStreamSource.DeepCopyInto(&out.EventStreamSource)
|
||||
in.EventStreamRecovery.DeepCopyInto(&out.EventStreamRecovery)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStream.
|
||||
func (in *EventStream) DeepCopy() *EventStream {
|
||||
if in == nil {
|
||||
return nil
|
||||
|
|
@ -99,7 +100,7 @@ func (in *EventStreamFlow) DeepCopyInto(out *EventStreamFlow) {
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamFlow.
|
||||
func (in *EventStreamFlow) DeepCopy() *EventStreamFlow {
|
||||
if in == nil {
|
||||
return nil
|
||||
|
|
@ -120,7 +121,7 @@ func (in *EventStreamRecovery) DeepCopyInto(out *EventStreamRecovery) {
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamRecovery.
|
||||
func (in *EventStreamRecovery) DeepCopy() *EventStreamRecovery {
|
||||
if in == nil {
|
||||
return nil
|
||||
|
|
@ -141,7 +142,7 @@ func (in *EventStreamSink) DeepCopyInto(out *EventStreamSink) {
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamSink.
|
||||
func (in *EventStreamSink) DeepCopy() *EventStreamSink {
|
||||
if in == nil {
|
||||
return nil
|
||||
|
|
@ -154,17 +155,17 @@ func (in *EventStreamSink) DeepCopy() *EventStreamSink {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EventStreamSource) DeepCopyInto(out *EventStreamSource) {
|
||||
*out = *in
|
||||
in.Connection.DeepCopyInto(&out.Connection)
|
||||
in.EventStreamTable.DeepCopyInto(&out.EventStreamTable)
|
||||
if in.Filter != nil {
|
||||
in, out := &in.Filter, &out.Filter
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
in.EventStreamTable.DeepCopyInto(&out.EventStreamTable)
|
||||
in.Connection.DeepCopyInto(&out.Connection)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamSource.
|
||||
func (in *EventStreamSource) DeepCopy() *EventStreamSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
|
|
@ -185,7 +186,7 @@ func (in *EventStreamTable) DeepCopyInto(out *EventStreamTable) {
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamTable.
|
||||
func (in *EventStreamTable) DeepCopy() *EventStreamTable {
|
||||
if in == nil {
|
||||
return nil
|
||||
|
|
@ -195,30 +196,6 @@ func (in *EventStreamTable) DeepCopy() *EventStreamTable {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FabricEventStreamSpec) DeepCopyInto(out *FabricEventStreamSpec) {
|
||||
*out = *in
|
||||
if in.EventStreams != nil {
|
||||
in, out := &in.EventStreams, &out.EventStreams
|
||||
*out = make([]EventStream, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricEventStreamSpec.
|
||||
func (in *FabricEventStreamSpec) DeepCopy() *FabricEventStreamSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FabricEventStreamSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FabricEventStream) DeepCopyInto(out *FabricEventStream) {
|
||||
*out = *in
|
||||
|
|
@ -278,3 +255,26 @@ func (in *FabricEventStreamList) DeepCopyObject() runtime.Object {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FabricEventStreamSpec) DeepCopyInto(out *FabricEventStreamSpec) {
|
||||
*out = *in
|
||||
if in.EventStreams != nil {
|
||||
in, out := &in.EventStreams, &out.EventStreams
|
||||
*out = make([]EventStream, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricEventStreamSpec.
|
||||
func (in *FabricEventStreamSpec) DeepCopy() *FabricEventStreamSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FabricEventStreamSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ import (
|
|||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/rest"
|
||||
|
|
@ -271,25 +272,29 @@ func (c *Cluster) Create() (err error) {
|
|||
)
|
||||
|
||||
defer func() {
|
||||
var (
|
||||
pgUpdatedStatus *acidv1.Postgresql
|
||||
errStatus error
|
||||
)
|
||||
if err == nil {
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running?
|
||||
} else {
|
||||
currentStatus := c.Status.DeepCopy()
|
||||
pg := c.Postgresql.DeepCopy()
|
||||
pg.Status.PostgresClusterStatus = acidv1.ClusterStatusRunning
|
||||
|
||||
if err != nil {
|
||||
c.logger.Warningf("cluster created failed: %v", err)
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed)
|
||||
pg.Status.PostgresClusterStatus = acidv1.ClusterStatusAddFailed
|
||||
}
|
||||
if errStatus != nil {
|
||||
c.logger.Warningf("could not set cluster status: %v", errStatus)
|
||||
}
|
||||
if pgUpdatedStatus != nil {
|
||||
|
||||
if !equality.Semantic.DeepEqual(currentStatus, pg.Status) {
|
||||
pgUpdatedStatus, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), pg)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not set cluster status: %v", err)
|
||||
return
|
||||
}
|
||||
c.setSpec(pgUpdatedStatus)
|
||||
}
|
||||
}()
|
||||
|
||||
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating)
|
||||
pg := c.Postgresql.DeepCopy()
|
||||
pg.Status.PostgresClusterStatus = acidv1.ClusterStatusCreating
|
||||
|
||||
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), pg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not set cluster status: %v", err)
|
||||
}
|
||||
|
|
@ -380,7 +385,7 @@ func (c *Cluster) Create() (err error) {
|
|||
|
||||
// create database objects unless we are running without pods or disabled
|
||||
// that feature explicitly
|
||||
if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
|
||||
if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || isStandbyCluster(&c.Spec)) {
|
||||
c.logger.Infof("Create roles")
|
||||
if err = c.createRoles(); err != nil {
|
||||
return fmt.Errorf("could not create users: %v", err)
|
||||
|
|
@ -841,6 +846,14 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) {
|
|||
return false, "new service's owner references do not match the current ones"
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(old.Spec.Selector, new.Spec.Selector) {
|
||||
return false, "new service's selector does not match the current one"
|
||||
}
|
||||
|
||||
if old.Spec.ExternalTrafficPolicy != new.Spec.ExternalTrafficPolicy {
|
||||
return false, "new service's ExternalTrafficPolicy does not match the current one"
|
||||
}
|
||||
|
||||
return true, ""
|
||||
}
|
||||
|
||||
|
|
@ -969,28 +982,33 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating)
|
||||
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusUpdating
|
||||
|
||||
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
newSpec, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), newSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not set cluster status to updating: %w", err)
|
||||
}
|
||||
|
||||
if !c.isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
// do not apply any major version related changes yet
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
}
|
||||
c.setSpec(newSpec)
|
||||
|
||||
defer func() {
|
||||
var (
|
||||
pgUpdatedStatus *acidv1.Postgresql
|
||||
err error
|
||||
)
|
||||
currentStatus := newSpec.Status.DeepCopy()
|
||||
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusRunning
|
||||
|
||||
if updateFailed {
|
||||
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed)
|
||||
} else {
|
||||
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
|
||||
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusUpdateFailed
|
||||
}
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not set cluster status: %v", err)
|
||||
}
|
||||
if pgUpdatedStatus != nil {
|
||||
|
||||
if !equality.Semantic.DeepEqual(currentStatus, newSpec.Status) {
|
||||
pgUpdatedStatus, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), newSpec)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not set cluster status: %v", err)
|
||||
return
|
||||
}
|
||||
c.setSpec(pgUpdatedStatus)
|
||||
}
|
||||
}()
|
||||
|
|
@ -1759,10 +1777,27 @@ func (c *Cluster) GetStatus() *ClusterStatus {
|
|||
}
|
||||
|
||||
func (c *Cluster) GetSwitchoverSchedule() string {
|
||||
now := time.Now().UTC()
|
||||
return c.getSwitchoverScheduleAtTime(now)
|
||||
}
|
||||
|
||||
func (c *Cluster) getSwitchoverScheduleAtTime(now time.Time) string {
|
||||
var possibleSwitchover, schedule time.Time
|
||||
|
||||
now := time.Now().UTC()
|
||||
for _, window := range c.Spec.MaintenanceWindows {
|
||||
maintenanceWindows := c.Spec.MaintenanceWindows
|
||||
if len(maintenanceWindows) == 0 {
|
||||
maintenanceWindows = make([]acidv1.MaintenanceWindow, 0, len(c.OpConfig.MaintenanceWindows))
|
||||
for _, windowStr := range c.OpConfig.MaintenanceWindows {
|
||||
var window acidv1.MaintenanceWindow
|
||||
if err := window.UnmarshalJSON([]byte(windowStr)); err != nil {
|
||||
c.logger.Errorf("could not parse default maintenance window %q: %v", windowStr, err)
|
||||
continue
|
||||
}
|
||||
maintenanceWindows = append(maintenanceWindows, window)
|
||||
}
|
||||
}
|
||||
|
||||
for _, window := range maintenanceWindows {
|
||||
// in the best case it is possible today
|
||||
possibleSwitchover = time.Date(now.Year(), now.Month(), now.Day(), window.StartTime.Hour(), window.StartTime.Minute(), 0, 0, time.UTC)
|
||||
if window.Everyday {
|
||||
|
|
@ -1780,10 +1815,15 @@ func (c *Cluster) GetSwitchoverSchedule() string {
|
|||
}
|
||||
}
|
||||
|
||||
if (schedule == time.Time{}) || possibleSwitchover.Before(schedule) {
|
||||
if (schedule.Equal(time.Time{})) || possibleSwitchover.Before(schedule) {
|
||||
schedule = possibleSwitchover
|
||||
}
|
||||
}
|
||||
|
||||
if schedule.IsZero() {
|
||||
return ""
|
||||
}
|
||||
|
||||
return schedule.Format("2006-01-02T15:04+00")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -680,8 +680,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
operatorAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -702,8 +701,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
operatorAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -714,8 +712,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
operatorAnnotations: make(map[string]string),
|
||||
serviceAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
|
|
@ -737,8 +734,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
operatorAnnotations: map[string]string{"foo": "bar"},
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
|
|
@ -780,8 +776,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -792,8 +787,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
serviceAnnotations: make(map[string]string),
|
||||
operatorAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -835,8 +829,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
operatorAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -857,8 +850,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
operatorAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -869,8 +861,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
operatorAnnotations: make(map[string]string),
|
||||
serviceAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
|
|
@ -892,8 +883,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
operatorAnnotations: map[string]string{"foo": "bar"},
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
|
|
@ -935,8 +925,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -947,8 +936,7 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
serviceAnnotations: make(map[string]string),
|
||||
operatorAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -1341,14 +1329,21 @@ func TestCompareEnv(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func newService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service {
|
||||
func newService(
|
||||
annotations map[string]string,
|
||||
svcType v1.ServiceType,
|
||||
sourceRanges []string,
|
||||
selector map[string]string,
|
||||
policy v1.ServiceExternalTrafficPolicyType) *v1.Service {
|
||||
svc := &v1.Service{
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: svcT,
|
||||
LoadBalancerSourceRanges: lbSr,
|
||||
Selector: selector,
|
||||
Type: svcType,
|
||||
LoadBalancerSourceRanges: sourceRanges,
|
||||
ExternalTrafficPolicy: policy,
|
||||
},
|
||||
}
|
||||
svc.Annotations = ann
|
||||
svc.Annotations = annotations
|
||||
return svc
|
||||
}
|
||||
|
||||
|
|
@ -1365,13 +1360,17 @@ func TestCompareServices(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
defaultPolicy := v1.ServiceExternalTrafficPolicyTypeCluster
|
||||
|
||||
serviceWithOwnerReference := newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"})
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil,
|
||||
defaultPolicy,
|
||||
)
|
||||
|
||||
ownerRef := metav1.OwnerReference{
|
||||
APIVersion: "acid.zalan.do/v1",
|
||||
|
|
@ -1394,17 +1393,17 @@ func TestCompareServices(t *testing.T) {
|
|||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
|
|
@ -1412,17 +1411,17 @@ func TestCompareServices(t *testing.T) {
|
|||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
match: false,
|
||||
reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`,
|
||||
},
|
||||
|
|
@ -1431,17 +1430,17 @@ func TestCompareServices(t *testing.T) {
|
|||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"185.249.56.0/22"}),
|
||||
[]string{"185.249.56.0/22"},
|
||||
nil, defaultPolicy),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange does not match the current one`,
|
||||
},
|
||||
|
|
@ -1450,17 +1449,17 @@ func TestCompareServices(t *testing.T) {
|
|||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{}),
|
||||
[]string{},
|
||||
nil, defaultPolicy),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange does not match the current one`,
|
||||
},
|
||||
|
|
@ -1469,13 +1468,41 @@ func TestCompareServices(t *testing.T) {
|
|||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"},
|
||||
nil, defaultPolicy),
|
||||
new: serviceWithOwnerReference,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
about: "new service has a label selector",
|
||||
current: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{},
|
||||
map[string]string{"cluster-name": "clstr", "spilo-role": "master"}, defaultPolicy),
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
about: "services differ on external traffic policy",
|
||||
current: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{},
|
||||
nil, defaultPolicy),
|
||||
new: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{},
|
||||
nil, v1.ServiceExternalTrafficPolicyTypeLocal),
|
||||
match: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
@ -2067,7 +2094,7 @@ func TestCompareVolumeMounts(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetSwitchoverSchedule(t *testing.T) {
|
||||
now := time.Now()
|
||||
now, _ := time.Parse(time.RFC3339, "2025-11-11T12:35:00Z")
|
||||
|
||||
futureTimeStart := now.Add(1 * time.Hour)
|
||||
futureWindowTimeStart := futureTimeStart.Format("15:04")
|
||||
|
|
@ -2076,10 +2103,13 @@ func TestGetSwitchoverSchedule(t *testing.T) {
|
|||
pastWindowTimeStart := pastTimeStart.Format("15:04")
|
||||
pastWindowTimeEnd := now.Add(-1 * time.Hour).Format("15:04")
|
||||
|
||||
defaultWindowStr := fmt.Sprintf("%s-%s", futureWindowTimeStart, futureWindowTimeEnd)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
windows []acidv1.MaintenanceWindow
|
||||
expected string
|
||||
name string
|
||||
windows []acidv1.MaintenanceWindow
|
||||
defaultWindows []string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "everyday maintenance windows is later today",
|
||||
|
|
@ -2141,12 +2171,41 @@ func TestGetSwitchoverSchedule(t *testing.T) {
|
|||
},
|
||||
expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"),
|
||||
},
|
||||
{
|
||||
name: "fallback to operator default window when spec is empty",
|
||||
windows: []acidv1.MaintenanceWindow{},
|
||||
defaultWindows: []string{defaultWindowStr},
|
||||
expected: futureTimeStart.Format("2006-01-02T15:04+00"),
|
||||
},
|
||||
{
|
||||
name: "no windows defined returns empty string",
|
||||
windows: []acidv1.MaintenanceWindow{},
|
||||
defaultWindows: nil,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "choose the earliest window from multiple in spec",
|
||||
windows: []acidv1.MaintenanceWindow{
|
||||
{
|
||||
Weekday: now.AddDate(0, 0, 2).Weekday(),
|
||||
StartTime: mustParseTime(futureWindowTimeStart),
|
||||
EndTime: mustParseTime(futureWindowTimeEnd),
|
||||
},
|
||||
{
|
||||
Weekday: now.AddDate(0, 0, 1).Weekday(),
|
||||
StartTime: mustParseTime(pastWindowTimeStart),
|
||||
EndTime: mustParseTime(pastWindowTimeEnd),
|
||||
},
|
||||
},
|
||||
expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cluster.Spec.MaintenanceWindows = tt.windows
|
||||
schedule := cluster.GetSwitchoverSchedule()
|
||||
cluster.OpConfig.MaintenanceWindows = tt.defaultWindows
|
||||
schedule := cluster.getSwitchoverScheduleAtTime(now)
|
||||
if schedule != tt.expected {
|
||||
t.Errorf("Expected GetSwitchoverSchedule to return %s, returned: %s", tt.expected, schedule)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -533,10 +533,6 @@ func (c *Cluster) generatePoolerServiceAnnotations(role PostgresRole, spec *acid
|
|||
annotations := c.getCustomServiceAnnotations(role, spec)
|
||||
|
||||
if c.shouldCreateLoadBalancerForPoolerService(role, spec) {
|
||||
// set ELB Timeout annotation with default value
|
||||
if _, ok := annotations[constants.ElbTimeoutAnnotationName]; !ok {
|
||||
annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue
|
||||
}
|
||||
// -repl suffix will be added by replicaDNSName
|
||||
clusterNameWithPoolerSuffix := c.connectionPoolerName(Master)
|
||||
if role == Master {
|
||||
|
|
|
|||
|
|
@ -281,9 +281,23 @@ func findUsersFromRotation(rotatedUsers []string, db *sql.DB) (map[string]string
|
|||
return extraUsers, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string, db *sql.DB) error {
|
||||
func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string) error {
|
||||
c.setProcessName("checking for rotated users to remove from the database due to configured retention")
|
||||
extraUsers, err := findUsersFromRotation(rotatedUsers, db)
|
||||
|
||||
err := c.initDbConn()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not init db connection: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if c.connectionIsClosed() {
|
||||
return
|
||||
}
|
||||
if err := c.closeDbConn(); err != nil {
|
||||
c.logger.Errorf("could not close database connection after removing users exceeding configured retention interval: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
extraUsers, err := findUsersFromRotation(rotatedUsers, c.pgDb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when querying for deprecated users from password rotation: %v", err)
|
||||
}
|
||||
|
|
@ -304,7 +318,7 @@ func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string, db *sql.DB) error {
|
|||
}
|
||||
if retentionDate.After(userCreationDate) {
|
||||
c.logger.Infof("dropping user %q due to configured days in password_rotation_user_retention", rotatedUser)
|
||||
if err = users.DropPgUser(rotatedUser, db); err != nil {
|
||||
if err = users.DropPgUser(rotatedUser, c.pgDb); err != nil {
|
||||
c.logger.Errorf("could not drop role %q: %v", rotatedUser, err)
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,9 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
|
@ -12,19 +14,16 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
|
|
@ -171,7 +170,7 @@ func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) e
|
|||
if isSmaller {
|
||||
msg = fmt.Sprintf("defined CPU limit %s for %q container is below required minimum %s and will be increased",
|
||||
cpuLimit.String(), constants.PostgresContainerName, minCPULimit)
|
||||
c.logger.Warningf(msg)
|
||||
c.logger.Warningf("%s", msg)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
|
||||
resources.Limits[v1.ResourceCPU], _ = resource.ParseQuantity(minCPULimit)
|
||||
}
|
||||
|
|
@ -188,7 +187,7 @@ func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) e
|
|||
if isSmaller {
|
||||
msg = fmt.Sprintf("defined memory limit %s for %q container is below required minimum %s and will be increased",
|
||||
memoryLimit.String(), constants.PostgresContainerName, minMemoryLimit)
|
||||
c.logger.Warningf(msg)
|
||||
c.logger.Warningf("%s", msg)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
|
||||
resources.Limits[v1.ResourceMemory], _ = resource.ParseQuantity(minMemoryLimit)
|
||||
}
|
||||
|
|
@ -314,6 +313,14 @@ func (c *Cluster) generateResourceRequirements(
|
|||
specLimits := acidv1.ResourceDescription{}
|
||||
result := v1.ResourceRequirements{}
|
||||
|
||||
enforceThresholds := true
|
||||
resourcesLimitAnnotationKey := c.OpConfig.IgnoreResourcesLimitsAnnotationKey
|
||||
if resourcesLimitAnnotationKey != "" {
|
||||
if value, exists := c.ObjectMeta.Annotations[resourcesLimitAnnotationKey]; exists && value == "true" {
|
||||
enforceThresholds = false
|
||||
}
|
||||
}
|
||||
|
||||
if resources != nil {
|
||||
specRequests = resources.ResourceRequests
|
||||
specLimits = resources.ResourceLimits
|
||||
|
|
@ -330,7 +337,7 @@ func (c *Cluster) generateResourceRequirements(
|
|||
}
|
||||
|
||||
// enforce minimum cpu and memory limits for Postgres containers only
|
||||
if containerName == constants.PostgresContainerName {
|
||||
if containerName == constants.PostgresContainerName && enforceThresholds {
|
||||
if err = c.enforceMinResourceLimits(&result); err != nil {
|
||||
return nil, fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||
}
|
||||
|
|
@ -345,7 +352,7 @@ func (c *Cluster) generateResourceRequirements(
|
|||
}
|
||||
|
||||
// enforce maximum cpu and memory requests for Postgres containers only
|
||||
if containerName == constants.PostgresContainerName {
|
||||
if containerName == constants.PostgresContainerName && enforceThresholds {
|
||||
if err = c.enforceMaxResourceRequests(&result); err != nil {
|
||||
return nil, fmt.Errorf("could not enforce maximum resource requests: %v", err)
|
||||
}
|
||||
|
|
@ -413,7 +420,7 @@ PatroniInitDBParams:
|
|||
}
|
||||
|
||||
if patroni.MaximumLagOnFailover >= 0 {
|
||||
config.Bootstrap.DCS.MaximumLagOnFailover = patroni.MaximumLagOnFailover
|
||||
config.Bootstrap.DCS.MaximumLagOnFailover = float32(patroni.MaximumLagOnFailover)
|
||||
}
|
||||
if patroni.LoopWait != 0 {
|
||||
config.Bootstrap.DCS.LoopWait = patroni.LoopWait
|
||||
|
|
@ -524,13 +531,14 @@ func (c *Cluster) nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinit
|
|||
},
|
||||
}
|
||||
} else {
|
||||
if c.OpConfig.NodeReadinessLabelMerge == "OR" {
|
||||
switch c.OpConfig.NodeReadinessLabelMerge {
|
||||
case "OR":
|
||||
manifestTerms := nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
manifestTerms = append(manifestTerms, nodeReadinessSelectorTerm)
|
||||
nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
||||
NodeSelectorTerms: manifestTerms,
|
||||
}
|
||||
} else if c.OpConfig.NodeReadinessLabelMerge == "AND" {
|
||||
case "AND":
|
||||
for i, nodeSelectorTerm := range nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
|
||||
manifestExpressions := nodeSelectorTerm.MatchExpressions
|
||||
manifestExpressions = append(manifestExpressions, matchExpressions...)
|
||||
|
|
@ -810,9 +818,6 @@ func (c *Cluster) generatePodTemplate(
|
|||
sidecarContainers []v1.Container,
|
||||
sharePgSocketWithSidecars *bool,
|
||||
tolerationsSpec *[]v1.Toleration,
|
||||
spiloRunAsUser *int64,
|
||||
spiloRunAsGroup *int64,
|
||||
spiloFSGroup *int64,
|
||||
nodeAffinity *v1.Affinity,
|
||||
schedulerName *string,
|
||||
terminateGracePeriod int64,
|
||||
|
|
@ -831,18 +836,22 @@ func (c *Cluster) generatePodTemplate(
|
|||
terminateGracePeriodSeconds := terminateGracePeriod
|
||||
containers := []v1.Container{*spiloContainer}
|
||||
containers = append(containers, sidecarContainers...)
|
||||
securityContext := v1.PodSecurityContext{}
|
||||
|
||||
if spiloRunAsUser != nil {
|
||||
securityContext.RunAsUser = spiloRunAsUser
|
||||
securityContext := v1.PodSecurityContext{
|
||||
RunAsUser: c.OpConfig.Resources.SpiloRunAsUser,
|
||||
RunAsGroup: c.OpConfig.Resources.SpiloRunAsGroup,
|
||||
FSGroup: c.OpConfig.Resources.SpiloFSGroup,
|
||||
}
|
||||
|
||||
if spiloRunAsGroup != nil {
|
||||
securityContext.RunAsGroup = spiloRunAsGroup
|
||||
if c.Spec.SpiloRunAsUser != nil {
|
||||
securityContext.RunAsUser = c.Spec.SpiloRunAsUser
|
||||
}
|
||||
|
||||
if spiloFSGroup != nil {
|
||||
securityContext.FSGroup = spiloFSGroup
|
||||
if c.Spec.SpiloRunAsGroup != nil {
|
||||
securityContext.RunAsGroup = c.Spec.SpiloRunAsGroup
|
||||
}
|
||||
|
||||
if c.Spec.SpiloFSGroup != nil {
|
||||
securityContext.FSGroup = c.Spec.SpiloFSGroup
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
|
|
@ -1298,11 +1307,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||
}
|
||||
|
||||
if spec.InitContainers != nil && len(spec.InitContainers) > 0 {
|
||||
if len(spec.InitContainers) > 0 {
|
||||
if c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) {
|
||||
c.logger.Warningf("initContainers specified but disabled in configuration - next statefulset creation would fail")
|
||||
}
|
||||
initContainers = spec.InitContainers
|
||||
if err := c.validateContainers(initContainers); err != nil {
|
||||
return nil, fmt.Errorf("invalid init containers: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// backward compatible check for InitContainers
|
||||
|
|
@ -1341,22 +1353,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
// pickup the docker image for the spilo container
|
||||
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
|
||||
|
||||
// determine the User, Group and FSGroup for the spilo pod
|
||||
effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser
|
||||
if spec.SpiloRunAsUser != nil {
|
||||
effectiveRunAsUser = spec.SpiloRunAsUser
|
||||
}
|
||||
|
||||
effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup
|
||||
if spec.SpiloRunAsGroup != nil {
|
||||
effectiveRunAsGroup = spec.SpiloRunAsGroup
|
||||
}
|
||||
|
||||
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
|
||||
if spec.SpiloFSGroup != nil {
|
||||
effectiveFSGroup = spec.SpiloFSGroup
|
||||
}
|
||||
|
||||
volumeMounts := generateVolumeMounts(spec.Volume)
|
||||
|
||||
// configure TLS with a custom secret volume
|
||||
|
|
@ -1401,7 +1397,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
|
||||
// generate container specs for sidecars specified in the cluster manifest
|
||||
clusterSpecificSidecars := []v1.Container{}
|
||||
if spec.Sidecars != nil && len(spec.Sidecars) > 0 {
|
||||
if len(spec.Sidecars) > 0 {
|
||||
// warn if sidecars are defined, but globally disabled (does not apply to globally defined sidecars)
|
||||
if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
|
||||
c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail")
|
||||
|
|
@ -1455,6 +1451,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
|
||||
sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername))
|
||||
|
||||
if err := c.validateContainers(sidecarContainers); err != nil {
|
||||
return nil, fmt.Errorf("invalid sidecar containers: %v", err)
|
||||
}
|
||||
|
||||
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
||||
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
|
||||
|
||||
|
|
@ -1470,9 +1470,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
sidecarContainers,
|
||||
c.OpConfig.SharePgSocketWithSidecars,
|
||||
&tolerationSpec,
|
||||
effectiveRunAsUser,
|
||||
effectiveRunAsGroup,
|
||||
effectiveFSGroup,
|
||||
c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity),
|
||||
spec.SchedulerName,
|
||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||
|
|
@ -1505,11 +1502,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
updateStrategy := appsv1.StatefulSetUpdateStrategy{Type: appsv1.OnDeleteStatefulSetStrategyType}
|
||||
|
||||
var podManagementPolicy appsv1.PodManagementPolicyType
|
||||
if c.OpConfig.PodManagementPolicy == "ordered_ready" {
|
||||
switch c.OpConfig.PodManagementPolicy {
|
||||
case "ordered_ready":
|
||||
podManagementPolicy = appsv1.OrderedReadyPodManagement
|
||||
} else if c.OpConfig.PodManagementPolicy == "parallel" {
|
||||
case "parallel":
|
||||
podManagementPolicy = appsv1.ParallelPodManagement
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
||||
}
|
||||
|
||||
|
|
@ -1675,7 +1673,7 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
|||
}
|
||||
}
|
||||
|
||||
if spec.StandbyCluster != nil {
|
||||
if isStandbyCluster(spec) {
|
||||
if newcur == 1 {
|
||||
min = newcur
|
||||
max = newcur
|
||||
|
|
@ -1928,7 +1926,7 @@ func (c *Cluster) generateSingleUserSecret(pgUser spec.PgUser) *v1.Secret {
|
|||
|
||||
// if secret lives in another namespace we cannot set ownerReferences
|
||||
var ownerReferences []metav1.OwnerReference
|
||||
if c.Config.OpConfig.EnableCrossNamespaceSecret && strings.Contains(username, ".") {
|
||||
if c.Config.OpConfig.EnableCrossNamespaceSecret && c.Postgresql.ObjectMeta.Namespace != pgUser.Namespace {
|
||||
ownerReferences = nil
|
||||
} else {
|
||||
ownerReferences = c.ownerReferences()
|
||||
|
|
@ -2031,11 +2029,6 @@ func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.Pos
|
|||
if c.shouldCreateLoadBalancerForService(role, spec) {
|
||||
dnsName := c.dnsName(role)
|
||||
|
||||
// Just set ELB Timeout annotation with default value, if it does not
|
||||
// have a custom value
|
||||
if _, ok := annotations[constants.ElbTimeoutAnnotationName]; !ok {
|
||||
annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue
|
||||
}
|
||||
// External DNS name annotation is not customizable
|
||||
annotations[constants.ZalandoDNSNameAnnotation] = dnsName
|
||||
}
|
||||
|
|
@ -2191,23 +2184,29 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript
|
|||
Value: description.StandbyPort,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
c.logger.Info("standby cluster streaming from WAL location")
|
||||
if description.S3WalPath != "" {
|
||||
if description.StandbyPrimarySlotName != "" {
|
||||
result = append(result, v1.EnvVar{
|
||||
Name: "STANDBY_WALE_S3_PREFIX",
|
||||
Value: description.S3WalPath,
|
||||
Name: "STANDBY_PRIMARY_SLOT_NAME",
|
||||
Value: description.StandbyPrimarySlotName,
|
||||
})
|
||||
} else if description.GSWalPath != "" {
|
||||
result = append(result, v1.EnvVar{
|
||||
Name: "STANDBY_WALE_GS_PREFIX",
|
||||
Value: description.GSWalPath,
|
||||
})
|
||||
} else {
|
||||
c.logger.Error("no WAL path specified in standby section")
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// WAL archive can be specified with or without standby_host
|
||||
if description.S3WalPath != "" {
|
||||
c.logger.Info("standby cluster using S3 WAL archive")
|
||||
result = append(result, v1.EnvVar{
|
||||
Name: "STANDBY_WALE_S3_PREFIX",
|
||||
Value: description.S3WalPath,
|
||||
})
|
||||
result = append(result, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"})
|
||||
result = append(result, v1.EnvVar{Name: "STANDBY_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||
} else if description.GSWalPath != "" {
|
||||
c.logger.Info("standby cluster using GCS WAL archive")
|
||||
result = append(result, v1.EnvVar{
|
||||
Name: "STANDBY_WALE_GS_PREFIX",
|
||||
Value: description.GSWalPath,
|
||||
})
|
||||
result = append(result, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"})
|
||||
result = append(result, v1.EnvVar{Name: "STANDBY_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||
}
|
||||
|
|
@ -2357,9 +2356,6 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) {
|
|||
[]v1.Container{},
|
||||
util.False(),
|
||||
&tolerationsSpec,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
c.nodeAffinity(c.OpConfig.NodeReadinessLabel, nil),
|
||||
nil,
|
||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||
|
|
@ -2591,3 +2587,15 @@ func ensurePath(file string, defaultDir string, defaultFile string) string {
|
|||
}
|
||||
return file
|
||||
}
|
||||
|
||||
func (c *Cluster) validateContainers(containers []v1.Container) error {
|
||||
for i, container := range containers {
|
||||
if container.Name == "" {
|
||||
return fmt.Errorf("container[%d]: name is required", i)
|
||||
}
|
||||
if container.Image == "" {
|
||||
return fmt.Errorf("container '%v': image is required", container.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,18 +72,18 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
subtest: "Patroni default configuration",
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "18"},
|
||||
patroni: &acidv1.Patroni{},
|
||||
opConfig: &config.Config{
|
||||
Auth: config.Auth{
|
||||
PamRoleName: "zalandos",
|
||||
},
|
||||
},
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`,
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/18/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`,
|
||||
},
|
||||
{
|
||||
subtest: "Patroni configured",
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "18"},
|
||||
patroni: &acidv1.Patroni{
|
||||
InitDB: map[string]string{
|
||||
"encoding": "UTF8",
|
||||
|
|
@ -102,38 +102,38 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
|
|||
FailsafeMode: util.True(),
|
||||
},
|
||||
opConfig: &config.Config{},
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`,
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/18/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`,
|
||||
},
|
||||
{
|
||||
subtest: "Patroni failsafe_mode configured globally",
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "18"},
|
||||
patroni: &acidv1.Patroni{},
|
||||
opConfig: &config.Config{
|
||||
EnablePatroniFailsafeMode: util.True(),
|
||||
},
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/18/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
||||
},
|
||||
{
|
||||
subtest: "Patroni failsafe_mode configured globally, disabled for cluster",
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "18"},
|
||||
patroni: &acidv1.Patroni{
|
||||
FailsafeMode: util.False(),
|
||||
},
|
||||
opConfig: &config.Config{
|
||||
EnablePatroniFailsafeMode: util.True(),
|
||||
},
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`,
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/18/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`,
|
||||
},
|
||||
{
|
||||
subtest: "Patroni failsafe_mode disabled globally, configured for cluster",
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"},
|
||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "18"},
|
||||
patroni: &acidv1.Patroni{
|
||||
FailsafeMode: util.True(),
|
||||
},
|
||||
opConfig: &config.Config{
|
||||
EnablePatroniFailsafeMode: util.False(),
|
||||
},
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/18/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
|
@ -164,15 +164,15 @@ func TestExtractPgVersionFromBinPath(t *testing.T) {
|
|||
},
|
||||
{
|
||||
subTest: "test current bin path against hard coded template",
|
||||
binPath: "/usr/lib/postgresql/17/bin",
|
||||
binPath: "/usr/lib/postgresql/18/bin",
|
||||
template: pgBinariesLocationTemplate,
|
||||
expected: "17",
|
||||
expected: "18",
|
||||
},
|
||||
{
|
||||
subTest: "test alternative bin path against a matching template",
|
||||
binPath: "/usr/pgsql-17/bin",
|
||||
binPath: "/usr/pgsql-18/bin",
|
||||
template: "/usr/pgsql-%v/bin",
|
||||
expected: "17",
|
||||
expected: "18",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -1370,7 +1370,33 @@ func TestStandbyEnv(t *testing.T) {
|
|||
envLen: 2,
|
||||
},
|
||||
{
|
||||
subTest: "from remote primary - ignore WAL path",
|
||||
subTest: "from remote primary with S3 WAL path",
|
||||
standbyOpts: &acidv1.StandbyDescription{
|
||||
S3WalPath: "s3://some/path/",
|
||||
StandbyHost: "remote-primary",
|
||||
},
|
||||
env: v1.EnvVar{
|
||||
Name: "STANDBY_HOST",
|
||||
Value: "remote-primary",
|
||||
},
|
||||
envPos: 0,
|
||||
envLen: 4,
|
||||
},
|
||||
{
|
||||
subTest: "verify S3 WAL env with standby host",
|
||||
standbyOpts: &acidv1.StandbyDescription{
|
||||
S3WalPath: "s3://some/path/",
|
||||
StandbyHost: "remote-primary",
|
||||
},
|
||||
env: v1.EnvVar{
|
||||
Name: "STANDBY_WALE_S3_PREFIX",
|
||||
Value: "s3://some/path/",
|
||||
},
|
||||
envPos: 1,
|
||||
envLen: 4,
|
||||
},
|
||||
{
|
||||
subTest: "from remote primary with GCS WAL path",
|
||||
standbyOpts: &acidv1.StandbyDescription{
|
||||
GSWalPath: "gs://some/path/",
|
||||
StandbyHost: "remote-primary",
|
||||
|
|
@ -1380,7 +1406,20 @@ func TestStandbyEnv(t *testing.T) {
|
|||
Value: "remote-primary",
|
||||
},
|
||||
envPos: 0,
|
||||
envLen: 1,
|
||||
envLen: 4,
|
||||
},
|
||||
{
|
||||
subTest: "from remote primary with slot name",
|
||||
standbyOpts: &acidv1.StandbyDescription{
|
||||
StandbyHost: "remote-primary",
|
||||
StandbyPrimarySlotName: "my_slot",
|
||||
},
|
||||
env: v1.EnvVar{
|
||||
Name: "STANDBY_PRIMARY_SLOT_NAME",
|
||||
Value: "my_slot",
|
||||
},
|
||||
envPos: 1,
|
||||
envLen: 2,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -1935,7 +1974,8 @@ func TestAdditionalVolume(t *testing.T) {
|
|||
AdditionalVolumes: additionalVolumes,
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
{
|
||||
Name: sidecarName,
|
||||
Name: sidecarName,
|
||||
DockerImage: "test-image",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -2148,7 +2188,7 @@ func TestSidecars(t *testing.T) {
|
|||
|
||||
spec = acidv1.PostgresSpec{
|
||||
PostgresqlParam: acidv1.PostgresqlParam{
|
||||
PgVersion: "17",
|
||||
PgVersion: "18",
|
||||
Parameters: map[string]string{
|
||||
"max_connections": "100",
|
||||
},
|
||||
|
|
@ -2163,10 +2203,12 @@ func TestSidecars(t *testing.T) {
|
|||
},
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
{
|
||||
Name: "cluster-specific-sidecar",
|
||||
Name: "cluster-specific-sidecar",
|
||||
DockerImage: "test-image",
|
||||
},
|
||||
{
|
||||
Name: "cluster-specific-sidecar-with-resources",
|
||||
Name: "cluster-specific-sidecar-with-resources",
|
||||
DockerImage: "test-image",
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
|
||||
|
|
@ -2201,7 +2243,8 @@ func TestSidecars(t *testing.T) {
|
|||
},
|
||||
SidecarContainers: []v1.Container{
|
||||
{
|
||||
Name: "global-sidecar",
|
||||
Name: "global-sidecar",
|
||||
Image: "test-image",
|
||||
},
|
||||
// will be replaced by a cluster specific sidecar with the same name
|
||||
{
|
||||
|
|
@ -2271,6 +2314,7 @@ func TestSidecars(t *testing.T) {
|
|||
// cluster specific sidecar
|
||||
assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{
|
||||
Name: "cluster-specific-sidecar",
|
||||
Image: "test-image",
|
||||
Env: env,
|
||||
Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
|
|
@ -2297,6 +2341,7 @@ func TestSidecars(t *testing.T) {
|
|||
// global sidecar
|
||||
assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{
|
||||
Name: "global-sidecar",
|
||||
Image: "test-image",
|
||||
Env: env,
|
||||
VolumeMounts: mounts,
|
||||
})
|
||||
|
|
@ -2325,6 +2370,180 @@ func TestSidecars(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestContainerValidation(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
spec acidv1.PostgresSpec
|
||||
clusterConfig Config
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "init container without image",
|
||||
spec: acidv1.PostgresSpec{
|
||||
PostgresqlParam: acidv1.PostgresqlParam{
|
||||
PgVersion: "18",
|
||||
},
|
||||
TeamID: "myapp",
|
||||
NumberOfInstances: 1,
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "invalid-initcontainer",
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterConfig: Config{
|
||||
OpConfig: config.Config{
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: "image is required",
|
||||
},
|
||||
{
|
||||
name: "sidecar without name",
|
||||
spec: acidv1.PostgresSpec{
|
||||
PostgresqlParam: acidv1.PostgresqlParam{
|
||||
PgVersion: "18",
|
||||
},
|
||||
TeamID: "myapp",
|
||||
NumberOfInstances: 1,
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
clusterConfig: Config{
|
||||
OpConfig: config.Config{
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
SidecarContainers: []v1.Container{
|
||||
{
|
||||
Image: "test-image",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: "name is required",
|
||||
},
|
||||
{
|
||||
name: "sidecar without image",
|
||||
spec: acidv1.PostgresSpec{
|
||||
PostgresqlParam: acidv1.PostgresqlParam{
|
||||
PgVersion: "18",
|
||||
},
|
||||
TeamID: "myapp",
|
||||
NumberOfInstances: 1,
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
{
|
||||
Name: "invalid-sidecar",
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterConfig: Config{
|
||||
OpConfig: config.Config{
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: "image is required",
|
||||
},
|
||||
{
|
||||
name: "valid containers pass validation",
|
||||
spec: acidv1.PostgresSpec{
|
||||
PostgresqlParam: acidv1.PostgresqlParam{
|
||||
PgVersion: "18",
|
||||
},
|
||||
TeamID: "myapp",
|
||||
NumberOfInstances: 1,
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
{
|
||||
Name: "valid-sidecar",
|
||||
DockerImage: "busybox:latest",
|
||||
},
|
||||
},
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "valid-initcontainer",
|
||||
Image: "alpine:latest",
|
||||
},
|
||||
},
|
||||
},
|
||||
clusterConfig: Config{
|
||||
OpConfig: config.Config{
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
name: "multiple invalid sidecars",
|
||||
spec: acidv1.PostgresSpec{
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
{
|
||||
Name: "sidecar1",
|
||||
},
|
||||
{
|
||||
Name: "sidecar2",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: "image is required",
|
||||
},
|
||||
{
|
||||
name: "empty container name and image",
|
||||
spec: acidv1.PostgresSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "",
|
||||
Image: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: "name is required",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cluster := New(tc.clusterConfig, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
|
||||
_, err := cluster.generateStatefulSet(&tc.spec)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tc.expectedError)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneratePodDisruptionBudget(t *testing.T) {
|
||||
testName := "Test PodDisruptionBudget spec generation"
|
||||
|
||||
|
|
@ -2618,7 +2837,8 @@ func TestGenerateService(t *testing.T) {
|
|||
Name: "cluster-specific-sidecar",
|
||||
},
|
||||
{
|
||||
Name: "cluster-specific-sidecar-with-resources",
|
||||
Name: "cluster-specific-sidecar-with-resources",
|
||||
DockerImage: "test-image",
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
|
||||
|
|
@ -2928,6 +3148,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
|||
namespace := "default"
|
||||
clusterNameLabel := "cluster-name"
|
||||
sidecarName := "postgres-exporter"
|
||||
dockerImage := "test-image"
|
||||
|
||||
// enforceMinResourceLimits will be called 2 times emitting 4 events (2x cpu, 2x memory raise)
|
||||
// enforceMaxResourceRequests will be called 4 times emitting 6 events (2x cpu, 4x memory cap)
|
||||
|
|
@ -2948,6 +3169,9 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
|||
PodRoleLabel: "spilo-role",
|
||||
}
|
||||
|
||||
configWithEnabledIgnoreResourcesLimits := configResources
|
||||
configWithEnabledIgnoreResourcesLimits.IgnoreResourcesLimitsAnnotationKey = "zalando.org/ignore-resources-limits"
|
||||
|
||||
tests := []struct {
|
||||
subTest string
|
||||
config config.Config
|
||||
|
|
@ -2993,7 +3217,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
|||
Spec: acidv1.PostgresSpec{
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
{
|
||||
Name: sidecarName,
|
||||
Name: sidecarName,
|
||||
DockerImage: dockerImage,
|
||||
},
|
||||
},
|
||||
TeamID: "acid",
|
||||
|
|
@ -3232,7 +3457,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
|||
Spec: acidv1.PostgresSpec{
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
{
|
||||
Name: sidecarName,
|
||||
Name: sidecarName,
|
||||
DockerImage: dockerImage,
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||
|
|
@ -3281,14 +3507,15 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
|||
{
|
||||
subTest: "test enforcing min cpu and memory limit",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
Resources: configWithEnabledIgnoreResourcesLimits,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "false"},
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Resources: &acidv1.Resources{
|
||||
|
|
@ -3306,6 +3533,35 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
|||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("250m"), Memory: k8sutil.StringToPointer("250Mi")},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "ingnore min cpu and memory limit threshold",
|
||||
config: config.Config{
|
||||
Resources: configWithEnabledIgnoreResourcesLimits,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "true"},
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("200m"), Memory: k8sutil.StringToPointer("200Mi")},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("200m"), Memory: k8sutil.StringToPointer("200Mi")},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test min cpu and memory limit are not enforced on sidecar",
|
||||
config: config.Config{
|
||||
|
|
@ -3321,7 +3577,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
|||
Spec: acidv1.PostgresSpec{
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
{
|
||||
Name: sidecarName,
|
||||
Name: sidecarName,
|
||||
DockerImage: dockerImage,
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||
|
|
@ -3342,14 +3599,15 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
|||
{
|
||||
subTest: "test enforcing max cpu and memory requests",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
Resources: configWithEnabledIgnoreResourcesLimits,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "yes"},
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Resources: &acidv1.Resources{
|
||||
|
|
@ -3367,6 +3625,35 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
|||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "ignore max cpu and memory requests limit",
|
||||
config: config.Config{
|
||||
Resources: configWithEnabledIgnoreResourcesLimits,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "true"},
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test SetMemoryRequestToLimit flag but raise only until max memory request",
|
||||
config: config.Config{
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
|
|
@ -14,16 +15,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// VersionMap Map of version numbers
|
||||
var VersionMap = map[string]int{
|
||||
"12": 120000,
|
||||
"13": 130000,
|
||||
"14": 140000,
|
||||
"15": 150000,
|
||||
"16": 160000,
|
||||
"17": 170000,
|
||||
}
|
||||
|
||||
const (
|
||||
majorVersionUpgradeSuccessAnnotation = "last-major-upgrade-success"
|
||||
majorVersionUpgradeFailureAnnotation = "last-major-upgrade-failure"
|
||||
|
|
@ -31,21 +22,22 @@ const (
|
|||
|
||||
// IsBiggerPostgresVersion Compare two Postgres version numbers
|
||||
func IsBiggerPostgresVersion(old string, new string) bool {
|
||||
oldN := VersionMap[old]
|
||||
newN := VersionMap[new]
|
||||
oldN, _ := strconv.Atoi(old)
|
||||
newN, _ := strconv.Atoi(new)
|
||||
return newN > oldN
|
||||
}
|
||||
|
||||
// GetDesiredMajorVersionAsInt Convert string to comparable integer of PG version
|
||||
func (c *Cluster) GetDesiredMajorVersionAsInt() int {
|
||||
return VersionMap[c.GetDesiredMajorVersion()]
|
||||
version, _ := strconv.Atoi(c.GetDesiredMajorVersion())
|
||||
return version * 10000
|
||||
}
|
||||
|
||||
// GetDesiredMajorVersion returns major version to use, incl. potential auto upgrade
|
||||
func (c *Cluster) GetDesiredMajorVersion() string {
|
||||
|
||||
if c.Config.OpConfig.MajorVersionUpgradeMode == "full" {
|
||||
// e.g. current is 13, minimal is 13 allowing 13 to 17 clusters, everything below is upgraded
|
||||
// e.g. current is 14, minimal is 14 allowing 14 to 18 clusters, everything below is upgraded
|
||||
if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {
|
||||
c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)
|
||||
return c.Config.OpConfig.TargetMajorVersion
|
||||
|
|
@ -198,7 +190,7 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
if !c.isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
c.logger.Infof("skipping major version upgrade, not in maintenance window")
|
||||
return nil
|
||||
}
|
||||
|
|
@ -237,7 +229,7 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
|
||||
isUpgradeSuccess := true
|
||||
numberOfPods := len(pods)
|
||||
if allRunning && masterPod != nil {
|
||||
if allRunning {
|
||||
c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion)
|
||||
if c.currentMajorVersion < desiredVersion {
|
||||
defer func() error {
|
||||
|
|
@ -276,8 +268,12 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
if err != nil {
|
||||
isUpgradeSuccess = false
|
||||
c.annotatePostgresResource(isUpgradeSuccess)
|
||||
c.logger.Errorf("upgrade action triggered but command failed: %v", err)
|
||||
if strings.TrimSpace(scriptErrMsg) == "" {
|
||||
scriptErrMsg = err.Error()
|
||||
}
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, scriptErrMsg)
|
||||
return fmt.Errorf(scriptErrMsg)
|
||||
return fmt.Errorf("%s", scriptErrMsg)
|
||||
}
|
||||
|
||||
c.annotatePostgresResource(isUpgradeSuccess)
|
||||
|
|
|
|||
|
|
@ -3,12 +3,11 @@ package cluster
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -281,7 +280,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
}
|
||||
|
||||
scheduleSwitchover := false
|
||||
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
if !c.isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
c.logger.Infof("postponing switchover, not in maintenance window")
|
||||
scheduleSwitchover = true
|
||||
}
|
||||
|
|
@ -433,9 +432,10 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp
|
|||
}
|
||||
|
||||
newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel])
|
||||
if newRole == Replica {
|
||||
switch newRole {
|
||||
case Replica:
|
||||
replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta))
|
||||
} else if newRole == Master {
|
||||
case Master:
|
||||
newMasterPod = newPod
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,12 +94,12 @@ func (c *Cluster) listResources() error {
|
|||
func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) {
|
||||
c.setProcessName("creating statefulset")
|
||||
// check if it's allowed that spec contains initContainers
|
||||
if c.Spec.InitContainers != nil && len(c.Spec.InitContainers) > 0 &&
|
||||
if len(c.Spec.InitContainers) > 0 &&
|
||||
c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) {
|
||||
return nil, fmt.Errorf("initContainers specified but disabled in configuration")
|
||||
}
|
||||
// check if it's allowed that spec contains sidecars
|
||||
if c.Spec.Sidecars != nil && len(c.Spec.Sidecars) > 0 &&
|
||||
if len(c.Spec.Sidecars) > 0 &&
|
||||
c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
|
||||
return nil, fmt.Errorf("sidecar containers specified but disabled in configuration")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,8 +4,10 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -15,11 +17,10 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
|
@ -40,23 +41,28 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
defer c.mu.Unlock()
|
||||
|
||||
oldSpec := c.Postgresql
|
||||
|
||||
if !c.isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
// do not apply any major version related changes yet
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
}
|
||||
|
||||
c.setSpec(newSpec)
|
||||
|
||||
defer func() {
|
||||
var (
|
||||
pgUpdatedStatus *acidv1.Postgresql
|
||||
errStatus error
|
||||
)
|
||||
if err != nil {
|
||||
c.logger.Warningf("error while syncing cluster state: %v", err)
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed)
|
||||
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusSyncFailed
|
||||
} else if !c.Status.Running() {
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
|
||||
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusRunning
|
||||
}
|
||||
if errStatus != nil {
|
||||
c.logger.Warningf("could not set cluster status: %v", errStatus)
|
||||
}
|
||||
if pgUpdatedStatus != nil {
|
||||
|
||||
if !equality.Semantic.DeepEqual(oldSpec.Status, newSpec.Status) {
|
||||
pgUpdatedStatus, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), newSpec)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not set cluster status: %v", err)
|
||||
return
|
||||
}
|
||||
c.setSpec(pgUpdatedStatus)
|
||||
}
|
||||
}()
|
||||
|
|
@ -97,11 +103,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
// do not apply any major version related changes yet
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
}
|
||||
|
||||
if err = c.syncStatefulSet(); err != nil {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
err = fmt.Errorf("could not sync statefulsets: %v", err)
|
||||
|
|
@ -1030,6 +1031,23 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
|
|||
standbyOptionsToSet["create_replica_methods"] = []string{"bootstrap_standby_with_wale", "basebackup_fast_xlog"}
|
||||
standbyOptionsToSet["restore_command"] = "envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\""
|
||||
|
||||
if c.Spec.StandbyCluster.StandbyHost != "" {
|
||||
standbyOptionsToSet["host"] = c.Spec.StandbyCluster.StandbyHost
|
||||
} else {
|
||||
standbyOptionsToSet["host"] = nil
|
||||
}
|
||||
|
||||
if c.Spec.StandbyCluster.StandbyPort != "" {
|
||||
standbyOptionsToSet["port"] = c.Spec.StandbyCluster.StandbyPort
|
||||
} else {
|
||||
standbyOptionsToSet["port"] = nil
|
||||
}
|
||||
|
||||
if c.Spec.StandbyCluster.StandbyPrimarySlotName != "" {
|
||||
standbyOptionsToSet["primary_slot_name"] = c.Spec.StandbyCluster.StandbyPrimarySlotName
|
||||
} else {
|
||||
standbyOptionsToSet["primary_slot_name"] = nil
|
||||
}
|
||||
} else {
|
||||
c.logger.Infof("promoting standby cluster and detach from source")
|
||||
standbyOptionsToSet = nil
|
||||
|
|
@ -1059,40 +1077,45 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
|
|||
func (c *Cluster) syncSecrets() error {
|
||||
c.logger.Debug("syncing secrets")
|
||||
c.setProcessName("syncing secrets")
|
||||
errors := make([]string, 0)
|
||||
generatedSecrets := c.generateUserSecrets()
|
||||
retentionUsers := make([]string, 0)
|
||||
currentTime := time.Now()
|
||||
|
||||
for secretUsername, generatedSecret := range generatedSecrets {
|
||||
secret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{})
|
||||
pgUserDegraded := false
|
||||
createdSecret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{})
|
||||
if err == nil {
|
||||
c.Secrets[secret.UID] = secret
|
||||
c.logger.Infof("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID)
|
||||
c.Secrets[createdSecret.UID] = createdSecret
|
||||
c.logger.Infof("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(createdSecret.ObjectMeta), generatedSecret.Namespace, createdSecret.UID)
|
||||
continue
|
||||
}
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
if err = c.updateSecret(secretUsername, generatedSecret, &retentionUsers, currentTime); err != nil {
|
||||
c.logger.Warningf("syncing secret %s failed: %v", util.NameFromMeta(secret.ObjectMeta), err)
|
||||
updatedSecret, err := c.updateSecret(secretUsername, generatedSecret, &retentionUsers, currentTime)
|
||||
if err == nil {
|
||||
c.Secrets[updatedSecret.UID] = updatedSecret
|
||||
continue
|
||||
}
|
||||
errors = append(errors, fmt.Sprintf("syncing secret %s failed: %v", util.NameFromMeta(generatedSecret.ObjectMeta), err))
|
||||
pgUserDegraded = true
|
||||
} else {
|
||||
return fmt.Errorf("could not create secret for user %s: in namespace %s: %v", secretUsername, generatedSecret.Namespace, err)
|
||||
errors = append(errors, fmt.Sprintf("could not create secret for user %s: in namespace %s: %v", secretUsername, generatedSecret.Namespace, err))
|
||||
pgUserDegraded = true
|
||||
}
|
||||
c.updatePgUser(secretUsername, pgUserDegraded)
|
||||
}
|
||||
|
||||
// remove rotation users that exceed the retention interval
|
||||
if len(retentionUsers) > 0 {
|
||||
err := c.initDbConn()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not init db connection: %v", err)
|
||||
}
|
||||
if err = c.cleanupRotatedUsers(retentionUsers, c.pgDb); err != nil {
|
||||
return fmt.Errorf("error removing users exceeding configured retention interval: %v", err)
|
||||
}
|
||||
if err := c.closeDbConn(); err != nil {
|
||||
c.logger.Errorf("could not close database connection after removing users exceeding configured retention interval: %v", err)
|
||||
if err := c.cleanupRotatedUsers(retentionUsers); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("error removing users exceeding configured retention interval: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("%v", strings.Join(errors, `', '`))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -1105,7 +1128,7 @@ func (c *Cluster) updateSecret(
|
|||
secretUsername string,
|
||||
generatedSecret *v1.Secret,
|
||||
retentionUsers *[]string,
|
||||
currentTime time.Time) error {
|
||||
currentTime time.Time) (*v1.Secret, error) {
|
||||
var (
|
||||
secret *v1.Secret
|
||||
err error
|
||||
|
|
@ -1115,20 +1138,21 @@ func (c *Cluster) updateSecret(
|
|||
|
||||
// get the secret first
|
||||
if secret, err = c.KubeClient.Secrets(generatedSecret.Namespace).Get(context.TODO(), generatedSecret.Name, metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not get current secret: %v", err)
|
||||
return generatedSecret, fmt.Errorf("could not get current secret: %v", err)
|
||||
}
|
||||
c.Secrets[secret.UID] = secret
|
||||
|
||||
// fetch user map to update later
|
||||
var userMap map[string]spec.PgUser
|
||||
var userKey string
|
||||
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
||||
switch secretUsername {
|
||||
case c.systemUsers[constants.SuperuserKeyName].Name:
|
||||
userKey = constants.SuperuserKeyName
|
||||
userMap = c.systemUsers
|
||||
} else if secretUsername == c.systemUsers[constants.ReplicationUserKeyName].Name {
|
||||
case c.systemUsers[constants.ReplicationUserKeyName].Name:
|
||||
userKey = constants.ReplicationUserKeyName
|
||||
userMap = c.systemUsers
|
||||
} else {
|
||||
default:
|
||||
userKey = secretUsername
|
||||
userMap = c.pgUsers
|
||||
}
|
||||
|
|
@ -1151,37 +1175,15 @@ func (c *Cluster) updateSecret(
|
|||
pwdUser := userMap[userKey]
|
||||
secretName := util.NameFromMeta(secret.ObjectMeta)
|
||||
|
||||
// if password rotation is enabled update password and username if rotation interval has been passed
|
||||
// rotation can be enabled globally or via the manifest (excluding the Postgres superuser)
|
||||
rotationEnabledInManifest := secretUsername != constants.SuperuserKeyName &&
|
||||
(slices.Contains(c.Spec.UsersWithSecretRotation, secretUsername) ||
|
||||
slices.Contains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername))
|
||||
|
||||
// globally enabled rotation is only allowed for manifest and bootstrapped roles
|
||||
allowedRoleTypes := []spec.RoleOrigin{spec.RoleOriginManifest, spec.RoleOriginBootstrap}
|
||||
rotationAllowed := !pwdUser.IsDbOwner && slices.Contains(allowedRoleTypes, pwdUser.Origin) && c.Spec.StandbyCluster == nil
|
||||
|
||||
// users can ignore any kind of rotation
|
||||
isIgnoringRotation := slices.Contains(c.Spec.UsersIgnoringSecretRotation, secretUsername)
|
||||
|
||||
if ((c.OpConfig.EnablePasswordRotation && rotationAllowed) || rotationEnabledInManifest) && !isIgnoringRotation {
|
||||
updateSecretMsg, err = c.rotatePasswordInSecret(secret, secretUsername, pwdUser.Origin, currentTime, retentionUsers)
|
||||
// do not perform any rotation of reset for standby clusters
|
||||
if !isStandbyCluster(&c.Spec) {
|
||||
updateSecretMsg, err = c.checkForPasswordRotation(secret, secretUsername, pwdUser, retentionUsers, currentTime)
|
||||
if err != nil {
|
||||
c.logger.Warnf("password rotation failed for user %s: %v", secretUsername, err)
|
||||
return nil, fmt.Errorf("error while checking for password rotation: %v", err)
|
||||
}
|
||||
if updateSecretMsg != "" {
|
||||
updateSecret = true
|
||||
}
|
||||
} else {
|
||||
// username might not match if password rotation has been disabled again
|
||||
if secretUsername != string(secret.Data["username"]) {
|
||||
*retentionUsers = append(*retentionUsers, secretUsername)
|
||||
secret.Data["username"] = []byte(secretUsername)
|
||||
secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength))
|
||||
secret.Data["nextRotation"] = []byte{}
|
||||
updateSecret = true
|
||||
updateSecretMsg = fmt.Sprintf("secret %s does not contain the role %s - updating username and resetting password", secretName, secretUsername)
|
||||
}
|
||||
}
|
||||
|
||||
// if this secret belongs to the infrastructure role and the password has changed - replace it in the secret
|
||||
|
|
@ -1208,26 +1210,73 @@ func (c *Cluster) updateSecret(
|
|||
}
|
||||
|
||||
if updateSecret {
|
||||
c.logger.Infof(updateSecretMsg)
|
||||
c.logger.Infof("%s", updateSecretMsg)
|
||||
if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update secret %s: %v", secretName, err)
|
||||
return nil, fmt.Errorf("could not update secret: %v", err)
|
||||
}
|
||||
c.Secrets[secret.UID] = secret
|
||||
}
|
||||
|
||||
if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations, nil); changed {
|
||||
patchData, err := metaAnnotationsPatch(generatedSecret.Annotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for secret %q annotations: %v", secret.Name, err)
|
||||
return nil, fmt.Errorf("could not form patch for secret annotations: %v", err)
|
||||
}
|
||||
secret, err = c.KubeClient.Secrets(secret.Namespace).Patch(context.TODO(), secret.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch annotations for secret %q: %v", secret.Name, err)
|
||||
return nil, fmt.Errorf("could not patch annotations for secret: %v", err)
|
||||
}
|
||||
c.Secrets[secret.UID] = secret
|
||||
}
|
||||
|
||||
return nil
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) checkForPasswordRotation(
|
||||
secret *v1.Secret,
|
||||
secretUsername string,
|
||||
pwdUser spec.PgUser,
|
||||
retentionUsers *[]string,
|
||||
currentTime time.Time) (string, error) {
|
||||
|
||||
var (
|
||||
passwordRotationMsg string
|
||||
err error
|
||||
)
|
||||
|
||||
// if password rotation is enabled update password and username if rotation interval has been passed
|
||||
// rotation can be enabled globally or via the manifest (excluding the Postgres superuser)
|
||||
rotationEnabledInManifest := secretUsername != constants.SuperuserKeyName &&
|
||||
(slices.Contains(c.Spec.UsersWithSecretRotation, secretUsername) ||
|
||||
slices.Contains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername))
|
||||
|
||||
// globally enabled rotation is only allowed for manifest and bootstrapped roles
|
||||
allowedRoleTypes := []spec.RoleOrigin{spec.RoleOriginManifest, spec.RoleOriginBootstrap}
|
||||
rotationAllowed := !pwdUser.IsDbOwner && slices.Contains(allowedRoleTypes, pwdUser.Origin)
|
||||
|
||||
// users can ignore any kind of rotation
|
||||
isIgnoringRotation := slices.Contains(c.Spec.UsersIgnoringSecretRotation, secretUsername)
|
||||
|
||||
if ((c.OpConfig.EnablePasswordRotation && rotationAllowed) || rotationEnabledInManifest) && !isIgnoringRotation {
|
||||
passwordRotationMsg, err = c.rotatePasswordInSecret(secret, secretUsername, pwdUser.Origin, currentTime, retentionUsers)
|
||||
if err != nil {
|
||||
c.logger.Warnf("password rotation failed for user %s: %v", secretUsername, err)
|
||||
}
|
||||
} else {
|
||||
// username might not match if password rotation has been disabled again
|
||||
usernameFromSecret := string(secret.Data["username"])
|
||||
if secretUsername != usernameFromSecret {
|
||||
// handle edge case when manifest user conflicts with a user from prepared databases
|
||||
if strings.Replace(usernameFromSecret, "-", "_", -1) == strings.Replace(secretUsername, "-", "_", -1) {
|
||||
return "", fmt.Errorf("could not update secret because of user name mismatch: expected: %s, got: %s", secretUsername, usernameFromSecret)
|
||||
}
|
||||
*retentionUsers = append(*retentionUsers, secretUsername)
|
||||
secret.Data["username"] = []byte(secretUsername)
|
||||
secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength))
|
||||
secret.Data["nextRotation"] = []byte{}
|
||||
passwordRotationMsg = fmt.Sprintf("secret does not contain the role %s - updating username and resetting password", secretUsername)
|
||||
}
|
||||
}
|
||||
|
||||
return passwordRotationMsg, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) rotatePasswordInSecret(
|
||||
|
|
@ -1333,6 +1382,23 @@ func (c *Cluster) rotatePasswordInSecret(
|
|||
return updateSecretMsg, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) updatePgUser(secretUsername string, degraded bool) {
|
||||
for key, pgUser := range c.pgUsers {
|
||||
if pgUser.Name == secretUsername {
|
||||
pgUser.Degraded = degraded
|
||||
c.pgUsers[key] = pgUser
|
||||
return
|
||||
}
|
||||
}
|
||||
for key, pgUser := range c.systemUsers {
|
||||
if pgUser.Name == secretUsername {
|
||||
pgUser.Degraded = degraded
|
||||
c.systemUsers[key] = pgUser
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) syncRoles() (err error) {
|
||||
c.setProcessName("syncing roles")
|
||||
|
||||
|
|
|
|||
|
|
@ -2,20 +2,22 @@ package cluster
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zalando/postgres-operator/mocks"
|
||||
|
|
@ -51,6 +53,16 @@ func newFakeK8sSyncClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
|||
}
|
||||
|
||||
func newFakeK8sSyncSecretsClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||
// add a reactor that checks namespace existence before creating secrets
|
||||
clientSet.PrependReactor("create", "secrets", func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
createAction := action.(k8stesting.CreateAction)
|
||||
secret := createAction.GetObject().(*v1.Secret)
|
||||
if secret.Namespace != "default" {
|
||||
return true, nil, errors.New("namespace does not exist")
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
return k8sutil.KubernetesClient{
|
||||
SecretsGetter: clientSet.CoreV1(),
|
||||
}, clientSet
|
||||
|
|
@ -789,6 +801,41 @@ func TestSyncStandbyClusterConfiguration(t *testing.T) {
|
|||
// this should update the Patroni config again
|
||||
err = cluster.syncStandbyClusterConfiguration()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// test with standby_host, standby_port and standby_primary_slot_name
|
||||
cluster.Spec.StandbyCluster = &acidv1.StandbyDescription{
|
||||
StandbyHost: "remote-primary.example.com",
|
||||
StandbyPort: "5433",
|
||||
StandbyPrimarySlotName: "standby_slot",
|
||||
}
|
||||
cluster.syncStatefulSet()
|
||||
updatedSts4 := cluster.Statefulset
|
||||
|
||||
// check that pods have all three STANDBY_* environment variables
|
||||
assert.Contains(t, updatedSts4.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_HOST", Value: "remote-primary.example.com"})
|
||||
assert.Contains(t, updatedSts4.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_PORT", Value: "5433"})
|
||||
assert.Contains(t, updatedSts4.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_PRIMARY_SLOT_NAME", Value: "standby_slot"})
|
||||
|
||||
// this should update the Patroni config with host, port and primary_slot_name
|
||||
err = cluster.syncStandbyClusterConfiguration()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// test property deletion: remove standby_primary_slot_name
|
||||
cluster.Spec.StandbyCluster = &acidv1.StandbyDescription{
|
||||
StandbyHost: "remote-primary.example.com",
|
||||
StandbyPort: "5433",
|
||||
}
|
||||
cluster.syncStatefulSet()
|
||||
updatedSts5 := cluster.Statefulset
|
||||
|
||||
// check that STANDBY_PRIMARY_SLOT_NAME is not present
|
||||
assert.Contains(t, updatedSts5.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_HOST", Value: "remote-primary.example.com"})
|
||||
assert.Contains(t, updatedSts5.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_PORT", Value: "5433"})
|
||||
assert.NotContains(t, updatedSts5.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_PRIMARY_SLOT_NAME", Value: "standby_slot"})
|
||||
|
||||
// this should update the Patroni config and set primary_slot_name to nil
|
||||
err = cluster.syncStandbyClusterConfiguration()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUpdateSecret(t *testing.T) {
|
||||
|
|
@ -811,7 +858,7 @@ func TestUpdateSecret(t *testing.T) {
|
|||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Databases: map[string]string{dbname: dbowner},
|
||||
Users: map[string]acidv1.UserFlags{appUser: {}, "bar": {}, dbowner: {}},
|
||||
Users: map[string]acidv1.UserFlags{appUser: {}, "bar": {}, dbowner: {}, "not-exist.test_user": {}},
|
||||
UsersIgnoringSecretRotation: []string{"bar"},
|
||||
UsersWithInPlaceSecretRotation: []string{dbowner},
|
||||
Streams: []acidv1.Stream{
|
||||
|
|
@ -843,6 +890,7 @@ func TestUpdateSecret(t *testing.T) {
|
|||
PasswordRotationInterval: 1,
|
||||
PasswordRotationUserRetention: 3,
|
||||
},
|
||||
EnableCrossNamespaceSecret: true,
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
|
|
@ -865,7 +913,9 @@ func TestUpdateSecret(t *testing.T) {
|
|||
|
||||
allUsers := make(map[string]spec.PgUser)
|
||||
for _, pgUser := range cluster.pgUsers {
|
||||
allUsers[pgUser.Name] = pgUser
|
||||
if !pgUser.Degraded {
|
||||
allUsers[pgUser.Name] = pgUser
|
||||
}
|
||||
}
|
||||
for _, systemUser := range cluster.systemUsers {
|
||||
allUsers[systemUser.Name] = systemUser
|
||||
|
|
@ -949,3 +999,57 @@ func TestUpdateSecret(t *testing.T) {
|
|||
t.Errorf("%s: updated secret does not contain expected username: expected %s, got %s", testName, appUser, currentUsername)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateSecretNameConflict(t *testing.T) {
|
||||
client, _ := newFakeK8sSyncSecretsClient()
|
||||
|
||||
clusterName := "acid-test-cluster"
|
||||
namespace := "default"
|
||||
secretTemplate := config.StringTemplate("{username}.{cluster}.credentials")
|
||||
|
||||
// define manifest user that has the same name as a prepared database owner user except for dashes vs underscores
|
||||
// because of this the operator cannot create both secrets because underscores are not allowed in k8s secret names
|
||||
pg := acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
PreparedDatabases: map[string]acidv1.PreparedDatabase{"prepared": {DefaultUsers: true}},
|
||||
Users: map[string]acidv1.UserFlags{"prepared-owner-user": {}},
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1Gi",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
Auth: config.Auth{
|
||||
SuperUsername: "postgres",
|
||||
ReplicationUsername: "standby",
|
||||
SecretNameTemplate: secretTemplate,
|
||||
},
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
},
|
||||
},
|
||||
}, client, pg, logger, eventRecorder)
|
||||
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
cluster.pgUsers = map[string]spec.PgUser{}
|
||||
|
||||
// init all users
|
||||
cluster.initUsers()
|
||||
// create secrets and fail because of user name mismatch
|
||||
// prepared-owner-user from manifest vs prepared_owner_user from prepared database
|
||||
err := cluster.syncSecrets()
|
||||
assert.Error(t, err)
|
||||
|
||||
// the order of secrets to sync is not deterministic, check only first part of the error message
|
||||
expectedError := fmt.Sprintf("syncing secret %s failed: error while checking for password rotation: could not update secret because of user name mismatch", "default/prepared-owner-user.acid-test-cluster.credentials")
|
||||
assert.Contains(t, err.Error(), expectedError)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -257,9 +258,9 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
|||
if teamID == "" {
|
||||
msg := "no teamId specified"
|
||||
if c.OpConfig.EnableTeamIdClusternamePrefix {
|
||||
return nil, fmt.Errorf(msg)
|
||||
return nil, fmt.Errorf("%s", msg)
|
||||
}
|
||||
c.logger.Warnf(msg)
|
||||
c.logger.Warnf("%s", msg)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
@ -663,15 +664,40 @@ func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (ac
|
|||
return resources, nil
|
||||
}
|
||||
|
||||
func isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool {
|
||||
if len(specMaintenanceWindows) == 0 {
|
||||
func isStandbyCluster(spec *acidv1.PostgresSpec) bool {
|
||||
for _, env := range spec.Env {
|
||||
hasStandbyEnv, _ := regexp.MatchString(`^STANDBY_WALE_(S3|GS|GSC|SWIFT)_PREFIX$`, env.Name)
|
||||
if hasStandbyEnv && env.Value != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return spec.StandbyCluster != nil
|
||||
}
|
||||
|
||||
func (c *Cluster) isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool {
|
||||
ignoreMaintenanceWindows := c.OpConfig.EnableMaintenanceWindows != nil && !*c.OpConfig.EnableMaintenanceWindows
|
||||
noWindowsDefined := len(specMaintenanceWindows) == 0 && len(c.OpConfig.MaintenanceWindows) == 0
|
||||
if noWindowsDefined || ignoreMaintenanceWindows {
|
||||
return true
|
||||
}
|
||||
now := time.Now()
|
||||
currentDay := now.Weekday()
|
||||
currentTime := now.Format("15:04")
|
||||
|
||||
for _, window := range specMaintenanceWindows {
|
||||
maintenanceWindows := specMaintenanceWindows
|
||||
if len(maintenanceWindows) == 0 {
|
||||
maintenanceWindows = make([]acidv1.MaintenanceWindow, 0, len(c.OpConfig.MaintenanceWindows))
|
||||
for _, windowStr := range c.OpConfig.MaintenanceWindows {
|
||||
var window acidv1.MaintenanceWindow
|
||||
if err := window.UnmarshalJSON([]byte(windowStr)); err != nil {
|
||||
c.logger.Errorf("could not parse default maintenance window %q: %v", windowStr, err)
|
||||
continue
|
||||
}
|
||||
maintenanceWindows = append(maintenanceWindows, window)
|
||||
}
|
||||
}
|
||||
|
||||
for _, window := range maintenanceWindows {
|
||||
startTime := window.StartTime.Format("15:04")
|
||||
endTime := window.EndTime.Format("15:04")
|
||||
|
||||
|
|
|
|||
|
|
@ -288,6 +288,12 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
|
|||
},
|
||||
}
|
||||
|
||||
// add postgresql cluster to fake client
|
||||
_, err := client.PostgresqlsGetter.Postgresqls(namespace).Create(context.TODO(), &pg, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cluster := New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -321,7 +327,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
|
|||
}, client, pg, logger, eventRecorder)
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
_, err := cluster.createStatefulSet()
|
||||
_, err = cluster.createStatefulSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -651,6 +657,23 @@ func Test_trimCronjobName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIsInMaintenanceWindow(t *testing.T) {
|
||||
cluster := New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
EnableMaintenanceWindows: util.True(),
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
DefaultCPURequest: "300m",
|
||||
DefaultCPULimit: "300m",
|
||||
DefaultMemoryRequest: "300Mi",
|
||||
DefaultMemoryLimit: "300Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
|
||||
now := time.Now()
|
||||
futureTimeStart := now.Add(1 * time.Hour)
|
||||
futureTimeStartFormatted := futureTimeStart.Format("15:04")
|
||||
|
|
@ -658,14 +681,31 @@ func TestIsInMaintenanceWindow(t *testing.T) {
|
|||
futureTimeEndFormatted := futureTimeEnd.Format("15:04")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
windows []acidv1.MaintenanceWindow
|
||||
expected bool
|
||||
name string
|
||||
windows []acidv1.MaintenanceWindow
|
||||
configWindows []string
|
||||
windowsFlag bool
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no maintenance windows",
|
||||
windows: nil,
|
||||
expected: true,
|
||||
name: "no maintenance windows",
|
||||
windows: nil,
|
||||
configWindows: nil,
|
||||
windowsFlag: true,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "maintenance windows diabled",
|
||||
windows: []acidv1.MaintenanceWindow{
|
||||
{
|
||||
Everyday: true,
|
||||
StartTime: mustParseTime("00:00"),
|
||||
EndTime: mustParseTime("23:59"),
|
||||
},
|
||||
},
|
||||
configWindows: nil,
|
||||
windowsFlag: false,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "maintenance windows with everyday",
|
||||
|
|
@ -676,7 +716,9 @@ func TestIsInMaintenanceWindow(t *testing.T) {
|
|||
EndTime: mustParseTime("23:59"),
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
configWindows: nil,
|
||||
windowsFlag: true,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "maintenance windows with weekday",
|
||||
|
|
@ -687,7 +729,9 @@ func TestIsInMaintenanceWindow(t *testing.T) {
|
|||
EndTime: mustParseTime("23:59"),
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
configWindows: nil,
|
||||
windowsFlag: true,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "maintenance windows with future interval time",
|
||||
|
|
@ -698,14 +742,38 @@ func TestIsInMaintenanceWindow(t *testing.T) {
|
|||
EndTime: mustParseTime(futureTimeEndFormatted),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
windowsFlag: true,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "global maintenance windows with future interval time",
|
||||
windows: nil,
|
||||
configWindows: []string{fmt.Sprintf("%s-%s", futureTimeStartFormatted, futureTimeEndFormatted)},
|
||||
windowsFlag: true,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "global maintenance windows all day",
|
||||
windows: nil,
|
||||
configWindows: []string{"00:00-02:00", "02:00-23:59"},
|
||||
windowsFlag: true,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "global maintenance windows ignored",
|
||||
windows: nil,
|
||||
configWindows: []string{"00:00-02:00", "02:00-23:59"},
|
||||
windowsFlag: false,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cluster.OpConfig.EnableMaintenanceWindows = &tt.windowsFlag
|
||||
cluster.OpConfig.MaintenanceWindows = tt.configWindows
|
||||
cluster.Spec.MaintenanceWindows = tt.windows
|
||||
if isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected {
|
||||
if cluster.isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected {
|
||||
t.Errorf("Expected isInMaintenanceWindow to return %t", tt.expected)
|
||||
}
|
||||
})
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ func (c *Cluster) syncUnderlyingEBSVolume() error {
|
|||
|
||||
if len(errors) > 0 {
|
||||
for _, s := range errors {
|
||||
c.logger.Warningf(s)
|
||||
c.logger.Warningf("%s", s)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -347,9 +347,11 @@ func (c *Controller) initController() {
|
|||
logMultiLineConfig(c.logger, c.opConfig.MustMarshal())
|
||||
|
||||
roleDefs := c.getInfrastructureRoleDefinitions()
|
||||
if infraRoles, err := c.getInfrastructureRoles(roleDefs); err != nil {
|
||||
c.logger.Warningf("could not get infrastructure roles: %v", err)
|
||||
} else {
|
||||
infraRoles, err := c.getInfrastructureRoles(roleDefs)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not get all infrastructure roles: %v", err)
|
||||
}
|
||||
if len(infraRoles) > 0 {
|
||||
c.config.InfrastructureRoles = infraRoles
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,17 +39,29 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p2")
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-18:4.1-p1")
|
||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
result.MaxInstances = fromCRD.MaxInstances
|
||||
result.IgnoreInstanceLimitsAnnotationKey = fromCRD.IgnoreInstanceLimitsAnnotationKey
|
||||
result.IgnoreResourcesLimitsAnnotationKey = fromCRD.IgnoreResourcesLimitsAnnotationKey
|
||||
result.ResyncPeriod = util.CoalesceDuration(time.Duration(fromCRD.ResyncPeriod), "30m")
|
||||
result.RepairPeriod = util.CoalesceDuration(time.Duration(fromCRD.RepairPeriod), "5m")
|
||||
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
||||
result.ShmVolume = util.CoalesceBool(fromCRD.ShmVolume, util.True())
|
||||
result.SidecarImages = fromCRD.SidecarImages
|
||||
result.SidecarContainers = fromCRD.SidecarContainers
|
||||
result.EnableMaintenanceWindows = util.CoalesceBool(fromCRD.EnableMaintenanceWindows, util.True())
|
||||
if len(fromCRD.MaintenanceWindows) > 0 {
|
||||
result.MaintenanceWindows = make([]string, 0, len(fromCRD.MaintenanceWindows))
|
||||
for _, window := range fromCRD.MaintenanceWindows {
|
||||
w, err := window.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("could not marshal configured maintenance window: %v", err))
|
||||
}
|
||||
result.MaintenanceWindows = append(result.MaintenanceWindows, string(w))
|
||||
}
|
||||
}
|
||||
|
||||
// user config
|
||||
result.SuperUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.SuperUsername, "postgres")
|
||||
|
|
@ -62,8 +74,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
// major version upgrade config
|
||||
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "manual")
|
||||
result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList
|
||||
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "13")
|
||||
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "17")
|
||||
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "14")
|
||||
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "18")
|
||||
|
||||
// kubernetes config
|
||||
result.EnableOwnerReferences = util.CoalesceBool(fromCRD.Kubernetes.EnableOwnerReferences, util.False())
|
||||
|
|
@ -180,7 +192,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
|
||||
// logical backup config
|
||||
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1")
|
||||
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
||||
result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName
|
||||
result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey
|
||||
|
|
|
|||
|
|
@ -161,7 +161,8 @@ func (c *Controller) acquireInitialListOfClusters() error {
|
|||
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) (*cluster.Cluster, error) {
|
||||
if c.opConfig.EnableTeamIdClusternamePrefix {
|
||||
if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil {
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid)
|
||||
pgSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusInvalid
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, pgSpec)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
@ -470,13 +471,25 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.
|
|||
|
||||
switch eventType {
|
||||
case EventAdd:
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed)
|
||||
informerNewSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusAddFailed
|
||||
_, err := c.KubeClient.SetPostgresCRDStatus(clusterName, informerNewSpec)
|
||||
if err != nil {
|
||||
c.logger.WithField("cluster-name", clusterName).Errorf("could not set PostgresCRD status: %v", err)
|
||||
}
|
||||
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError)
|
||||
case EventUpdate:
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed)
|
||||
informerNewSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusUpdateFailed
|
||||
_, err := c.KubeClient.SetPostgresCRDStatus(clusterName, informerNewSpec)
|
||||
if err != nil {
|
||||
c.logger.WithField("cluster-name", clusterName).Errorf("could not set PostgresCRD status: %v", err)
|
||||
}
|
||||
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError)
|
||||
default:
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed)
|
||||
informerNewSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusSyncFailed
|
||||
_, err := c.KubeClient.SetPostgresCRDStatus(clusterName, informerNewSpec)
|
||||
if err != nil {
|
||||
c.logger.WithField("cluster-name", clusterName).Errorf("could not set PostgresCRD status: %v", err)
|
||||
}
|
||||
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError)
|
||||
}
|
||||
|
||||
|
|
@ -597,7 +610,7 @@ func (c *Controller) createPodServiceAccount(namespace string) error {
|
|||
_, err := c.KubeClient.ServiceAccounts(namespace).Get(context.TODO(), podServiceAccountName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
|
||||
c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace))
|
||||
c.logger.Infof("creating pod service account %q in the %q namespace", podServiceAccountName, namespace)
|
||||
|
||||
// get a separate copy of service account
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
|
|
|
|||
|
|
@ -2,14 +2,12 @@ package controller
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
|
|
@ -65,15 +63,9 @@ func (c *Controller) createOperatorCRD(desiredCrd *apiextv1.CustomResourceDefini
|
|||
}
|
||||
if crd != nil {
|
||||
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
||||
// copy annotations and labels from existing CRD since we do not define them
|
||||
desiredCrd.Annotations = crd.Annotations
|
||||
desiredCrd.Labels = crd.Labels
|
||||
patch, err := json.Marshal(desiredCrd)
|
||||
crd.Spec = desiredCrd.Spec
|
||||
_, err := c.KubeClient.CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal new customResourceDefintion %q: %v", desiredCrd.Name, err)
|
||||
}
|
||||
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(
|
||||
context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update customResourceDefinition %q: %v", crd.Name, err)
|
||||
}
|
||||
}
|
||||
|
|
@ -103,7 +95,11 @@ func (c *Controller) createOperatorCRD(desiredCrd *apiextv1.CustomResourceDefini
|
|||
}
|
||||
|
||||
func (c *Controller) createPostgresCRD() error {
|
||||
return c.createOperatorCRD(acidv1.PostgresCRD(c.opConfig.CRDCategories))
|
||||
crd, err := acidv1.PostgresCRD(c.opConfig.CRDCategories)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create Postgres CRD object: %v", err)
|
||||
}
|
||||
return c.createOperatorCRD(crd)
|
||||
}
|
||||
|
||||
func (c *Controller) createConfigurationCRD() error {
|
||||
|
|
@ -248,7 +244,7 @@ func (c *Controller) getInfrastructureRoles(
|
|||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return uniqRoles, fmt.Errorf(strings.Join(errors, `', '`))
|
||||
return uniqRoles, fmt.Errorf("%s", strings.Join(errors, `', '`))
|
||||
}
|
||||
|
||||
return uniqRoles, nil
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
@ -25,8 +25,8 @@ SOFTWARE.
|
|||
package versioned
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
fmt "fmt"
|
||||
http "net/http"
|
||||
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||
zalandov1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/zalando.org/v1"
|
||||
|
|
@ -41,8 +41,7 @@ type Interface interface {
|
|||
ZalandoV1() zalandov1.ZalandoV1Interface
|
||||
}
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
// version included in a Clientset.
|
||||
// Clientset contains the clients for groups.
|
||||
type Clientset struct {
|
||||
*discovery.DiscoveryClient
|
||||
acidV1 *acidv1.AcidV1Client
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
@ -39,8 +39,12 @@ import (
|
|||
|
||||
// NewSimpleClientset returns a clientset that will respond with the provided objects.
|
||||
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
|
||||
// without applying any validations and/or defaults. It shouldn't be considered a replacement
|
||||
// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
|
||||
// for a real clientset and is mostly useful in simple unit tests.
|
||||
//
|
||||
// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves
|
||||
// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
|
||||
// via --with-applyconfig).
|
||||
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
|
||||
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
|
||||
for _, obj := range objects {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
@ -25,10 +25,10 @@ SOFTWARE.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
http "net/http"
|
||||
|
||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
|
@ -101,10 +101,10 @@ func New(c rest.Interface) *AcidV1Client {
|
|||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1.SchemeGroupVersion
|
||||
gv := acidzalandov1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
||||
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2025 Compose, Zalando SE
|
||||
Copyright 2026 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue