Merge branch 'master' into gh-pages
This commit is contained in:
commit
276e1134ff
|
|
@ -9,7 +9,7 @@ assignees: ''
|
||||||
|
|
||||||
Please, answer some short questions which should help us to understand your problem / question better?
|
Please, answer some short questions which should help us to understand your problem / question better?
|
||||||
|
|
||||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.10.1
|
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.11.0
|
||||||
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
|
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
|
||||||
- **Are you running Postgres Operator in production?** [yes | no]
|
- **Are you running Postgres Operator in production?** [yes | no]
|
||||||
- **Type of issue?** [Bug report, question, feature request, etc.]
|
- **Type of issue?** [Bug report, question, feature request, etc.]
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,15 @@
|
||||||
name: Publish multiarch postgres-operator image on ghcr.io
|
name: Publish multiarch postgres-operator images on ghcr.io
|
||||||
|
|
||||||
env:
|
env:
|
||||||
REGISTRY: ghcr.io
|
REGISTRY: ghcr.io
|
||||||
IMAGE_NAME: ${{ github.repository }}
|
IMAGE_NAME: ${{ github.repository }}
|
||||||
|
IMAGE_NAME_UI: ${{ github.repository }}-ui
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
publish:
|
publish:
|
||||||
name: Build, test and push image
|
name: Build, test and push image
|
||||||
|
|
@ -21,7 +23,7 @@ jobs:
|
||||||
|
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: "^1.19.8"
|
go-version: "^1.21.7"
|
||||||
|
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
run: make deps mocks test
|
run: make deps mocks test
|
||||||
|
|
@ -29,8 +31,20 @@ jobs:
|
||||||
- name: Define image name
|
- name: Define image name
|
||||||
id: image
|
id: image
|
||||||
run: |
|
run: |
|
||||||
IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${GITHUB_REF/refs\/tags\//}"
|
OPERATOR_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${GITHUB_REF/refs\/tags\//}"
|
||||||
echo "NAME=$IMAGE" >> $GITHUB_OUTPUT
|
echo "OPERATOR_IMAGE=$OPERATOR_IMAGE" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Define UI image name
|
||||||
|
id: image_ui
|
||||||
|
run: |
|
||||||
|
UI_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME_UI }}:${GITHUB_REF/refs\/tags\//}"
|
||||||
|
echo "UI_IMAGE=$UI_IMAGE" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Define logical backup image name
|
||||||
|
id: image_lb
|
||||||
|
run: |
|
||||||
|
BACKUP_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}/logical-backup:${GITHUB_REF_NAME}"
|
||||||
|
echo "BACKUP_IMAGE=$BACKUP_IMAGE" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
@ -45,12 +59,30 @@ jobs:
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and push multiarch image to ghcr
|
- name: Build and push multiarch operator image to ghcr
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: docker/Dockerfile
|
file: docker/Dockerfile
|
||||||
push: true
|
push: true
|
||||||
build-args: BASE_IMAGE=alpine:3.15
|
build-args: BASE_IMAGE=alpine:3.15
|
||||||
tags: "${{ steps.image.outputs.NAME }}"
|
tags: "${{ steps.image.outputs.OPERATOR_IMAGE }}"
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
|
||||||
|
- name: Build and push multiarch ui image to ghcr
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
context: ui
|
||||||
|
push: true
|
||||||
|
build-args: BASE_IMAGE=alpine:3.15
|
||||||
|
tags: "${{ steps.image_ui.outputs.UI_IMAGE }}"
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
|
||||||
|
- name: Build and push multiarch logical-backup image to ghcr
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
context: docker/logical-backup
|
||||||
|
push: true
|
||||||
|
build-args: BASE_IMAGE=ubuntu:22.04
|
||||||
|
tags: "${{ steps.image_lb.outputs.BACKUP_IMAGE }}"
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ jobs:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v1
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: "^1.19.8"
|
go-version: "^1.21.7"
|
||||||
- name: Make dependencies
|
- name: Make dependencies
|
||||||
run: make deps mocks
|
run: make deps mocks
|
||||||
- name: Code generation
|
- name: Code generation
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ jobs:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: "^1.19.8"
|
go-version: "^1.21.7"
|
||||||
- name: Make dependencies
|
- name: Make dependencies
|
||||||
run: make deps mocks
|
run: make deps mocks
|
||||||
- name: Compile
|
- name: Compile
|
||||||
|
|
@ -22,7 +22,7 @@ jobs:
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
run: go test -race -covermode atomic -coverprofile=coverage.out ./...
|
run: go test -race -covermode atomic -coverprofile=coverage.out ./...
|
||||||
- name: Convert coverage to lcov
|
- name: Convert coverage to lcov
|
||||||
uses: jandelgado/gcov2lcov-action@v1.0.8
|
uses: jandelgado/gcov2lcov-action@v1.0.9
|
||||||
- name: Coveralls
|
- name: Coveralls
|
||||||
uses: coverallsapp/github-action@master
|
uses: coverallsapp/github-action@master
|
||||||
with:
|
with:
|
||||||
|
|
|
||||||
|
|
@ -1,2 +1,2 @@
|
||||||
# global owners
|
# global owners
|
||||||
* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet
|
* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet @macedigital
|
||||||
|
|
|
||||||
2
LICENSE
2
LICENSE
|
|
@ -1,6 +1,6 @@
|
||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2023 Zalando SE
|
Copyright (c) 2024 Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -4,3 +4,4 @@ Jan Mussler <jan.mussler@zalando.de>
|
||||||
Jociele Padilha <jociele.padilha@zalando.de>
|
Jociele Padilha <jociele.padilha@zalando.de>
|
||||||
Ida Novindasari <ida.novindasari@zalando.de>
|
Ida Novindasari <ida.novindasari@zalando.de>
|
||||||
Polina Bungina <polina.bungina@zalando.de>
|
Polina Bungina <polina.bungina@zalando.de>
|
||||||
|
Matthias Adler <matthias.adler@zalando.de>
|
||||||
|
|
|
||||||
4
Makefile
4
Makefile
|
|
@ -69,7 +69,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE}
|
||||||
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" .
|
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" .
|
||||||
|
|
||||||
indocker-race:
|
indocker-race:
|
||||||
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.19.8 bash -c "make linux"
|
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.21.7 bash -c "make linux"
|
||||||
|
|
||||||
push:
|
push:
|
||||||
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
|
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
|
||||||
|
|
@ -78,7 +78,7 @@ mocks:
|
||||||
GO111MODULE=on go generate ./...
|
GO111MODULE=on go generate ./...
|
||||||
|
|
||||||
tools:
|
tools:
|
||||||
GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.25.9
|
GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.28.7
|
||||||
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
|
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
|
||||||
GO111MODULE=on go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
|
|
|
||||||
21
README.md
21
README.md
|
|
@ -29,13 +29,13 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
||||||
|
|
||||||
### PostgreSQL features
|
### PostgreSQL features
|
||||||
|
|
||||||
* Supports PostgreSQL 15, starting from 10+
|
* Supports PostgreSQL 16, starting from 11+
|
||||||
* Streaming replication cluster via Patroni
|
* Streaming replication cluster via Patroni
|
||||||
* Point-In-Time-Recovery with
|
* Point-In-Time-Recovery with
|
||||||
[pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) /
|
[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html) /
|
||||||
[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
|
[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
|
||||||
* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),
|
* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),
|
||||||
[pg_stat_statements](https://www.postgresql.org/docs/15/pgstatstatements.html),
|
[pg_stat_statements](https://www.postgresql.org/docs/16/pgstatstatements.html),
|
||||||
[pgextwlist](https://github.com/dimitri/pgextwlist),
|
[pgextwlist](https://github.com/dimitri/pgextwlist),
|
||||||
[pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon)
|
[pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon)
|
||||||
* Incl. popular Postgres extensions such as
|
* Incl. popular Postgres extensions such as
|
||||||
|
|
@ -45,6 +45,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
||||||
[pg_partman](https://github.com/pgpartman/pg_partman),
|
[pg_partman](https://github.com/pgpartman/pg_partman),
|
||||||
[pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache),
|
[pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache),
|
||||||
[pgq](https://github.com/pgq/pgq),
|
[pgq](https://github.com/pgq/pgq),
|
||||||
|
[pgvector](https://github.com/pgvector/pgvector),
|
||||||
[plpgsql_check](https://github.com/okbob/plpgsql_check),
|
[plpgsql_check](https://github.com/okbob/plpgsql_check),
|
||||||
[postgis](https://postgis.net/),
|
[postgis](https://postgis.net/),
|
||||||
[set_user](https://github.com/pgaudit/set_user) and
|
[set_user](https://github.com/pgaudit/set_user) and
|
||||||
|
|
@ -57,8 +58,9 @@ production for over five years.
|
||||||
|
|
||||||
| Release | Postgres versions | K8s versions | Golang |
|
| Release | Postgres versions | K8s versions | Golang |
|
||||||
| :-------- | :---------------: | :---------------: | :-----: |
|
| :-------- | :---------------: | :---------------: | :-----: |
|
||||||
| v1.10.* | 10 → 15 | 1.21+ | 1.19.8 |
|
| v1.11.* | 11 → 16 | 1.21 → 1.28 | 1.21.7 |
|
||||||
| v1.9.0 | 10 → 15 | 1.21+ | 1.18.9 |
|
| v1.10.* | 10 → 15 | 1.21 → 1.28 | 1.19.8 |
|
||||||
|
| v1.9.0 | 10 → 15 | 1.21 → 1.28 | 1.18.9 |
|
||||||
| v1.8.* | 9.5 → 14 | 1.20 → 1.24 | 1.17.4 |
|
| v1.8.* | 9.5 → 14 | 1.20 → 1.24 | 1.17.4 |
|
||||||
| v1.7.1 | 9.5 → 14 | 1.20 → 1.24 | 1.16.9 |
|
| v1.7.1 | 9.5 → 14 | 1.20 → 1.24 | 1.16.9 |
|
||||||
|
|
||||||
|
|
@ -70,7 +72,8 @@ For a quick first impression follow the instructions of this
|
||||||
|
|
||||||
## Supported setups of Postgres and Applications
|
## Supported setups of Postgres and Applications
|
||||||
|
|
||||||

|

|
||||||
|

|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
|
|
@ -86,9 +89,3 @@ There is a browser-friendly version of this documentation at
|
||||||
* [Configuration options](docs/reference/operator_parameters.md)
|
* [Configuration options](docs/reference/operator_parameters.md)
|
||||||
* [Postgres manifest reference](docs/reference/cluster_manifest.md)
|
* [Postgres manifest reference](docs/reference/cluster_manifest.md)
|
||||||
* [Command-line options and environment variables](docs/reference/command_line_and_environment.md)
|
* [Command-line options and environment variables](docs/reference/command_line_and_environment.md)
|
||||||
|
|
||||||
## Community
|
|
||||||
|
|
||||||
There are two places to get in touch with the community:
|
|
||||||
1. The [GitHub issue tracker](https://github.com/zalando/postgres-operator/issues)
|
|
||||||
2. The **#postgres-operator** [slack channel](https://postgres-slack.herokuapp.com)
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
name: postgres-operator-ui
|
name: postgres-operator-ui
|
||||||
version: 1.10.1
|
version: 1.11.0
|
||||||
appVersion: 1.10.1
|
appVersion: 1.11.0
|
||||||
home: https://github.com/zalando/postgres-operator
|
home: https://github.com/zalando/postgres-operator
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||||
keywords:
|
keywords:
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,32 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
entries:
|
entries:
|
||||||
postgres-operator-ui:
|
postgres-operator-ui:
|
||||||
|
- apiVersion: v2
|
||||||
|
appVersion: 1.11.0
|
||||||
|
created: "2024-03-14T17:12:46.692800586+01:00"
|
||||||
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
|
database-as-a-service user experience
|
||||||
|
digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2
|
||||||
|
home: https://github.com/zalando/postgres-operator
|
||||||
|
keywords:
|
||||||
|
- postgres
|
||||||
|
- operator
|
||||||
|
- ui
|
||||||
|
- cloud-native
|
||||||
|
- patroni
|
||||||
|
- spilo
|
||||||
|
maintainers:
|
||||||
|
- email: opensource@zalando.de
|
||||||
|
name: Zalando
|
||||||
|
name: postgres-operator-ui
|
||||||
|
sources:
|
||||||
|
- https://github.com/zalando/postgres-operator
|
||||||
|
urls:
|
||||||
|
- postgres-operator-ui-1.11.0.tgz
|
||||||
|
version: 1.11.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.10.1
|
appVersion: 1.10.1
|
||||||
created: "2023-09-07T16:27:29.490678409+02:00"
|
created: "2024-03-14T17:12:46.691746076+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce
|
digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce
|
||||||
|
|
@ -26,7 +49,7 @@ entries:
|
||||||
version: 1.10.1
|
version: 1.10.1
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.10.0
|
appVersion: 1.10.0
|
||||||
created: "2023-09-07T16:27:29.489712628+02:00"
|
created: "2024-03-14T17:12:46.690807634+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: 47413650e3188539ae778a601998efa2c4f80b8aa16e3668a2fc7b72e014b605
|
digest: 47413650e3188539ae778a601998efa2c4f80b8aa16e3668a2fc7b72e014b605
|
||||||
|
|
@ -49,7 +72,7 @@ entries:
|
||||||
version: 1.10.0
|
version: 1.10.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.9.0
|
appVersion: 1.9.0
|
||||||
created: "2023-09-07T16:27:29.506671133+02:00"
|
created: "2024-03-14T17:12:46.696626932+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc
|
digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc
|
||||||
|
|
@ -72,7 +95,7 @@ entries:
|
||||||
version: 1.9.0
|
version: 1.9.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.8.2
|
appVersion: 1.8.2
|
||||||
created: "2023-09-07T16:27:29.505718885+02:00"
|
created: "2024-03-14T17:12:46.69565936+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: fbfc90fa8fd007a08a7c02e0ec9108bb8282cbb42b8c976d88f2193d6edff30c
|
digest: fbfc90fa8fd007a08a7c02e0ec9108bb8282cbb42b8c976d88f2193d6edff30c
|
||||||
|
|
@ -95,7 +118,7 @@ entries:
|
||||||
version: 1.8.2
|
version: 1.8.2
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.8.1
|
appVersion: 1.8.1
|
||||||
created: "2023-09-07T16:27:29.504804961+02:00"
|
created: "2024-03-14T17:12:46.694691362+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: d26342e385ea51a0fbfbe23477999863e9489664ae803ea5c56da8897db84d24
|
digest: d26342e385ea51a0fbfbe23477999863e9489664ae803ea5c56da8897db84d24
|
||||||
|
|
@ -118,7 +141,7 @@ entries:
|
||||||
version: 1.8.1
|
version: 1.8.1
|
||||||
- apiVersion: v1
|
- apiVersion: v1
|
||||||
appVersion: 1.8.0
|
appVersion: 1.8.0
|
||||||
created: "2023-09-07T16:27:29.503862231+02:00"
|
created: "2024-03-14T17:12:46.693750873+01:00"
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||||
database-as-a-service user experience
|
database-as-a-service user experience
|
||||||
digest: d4a7b40c23fd167841cc28342afdbd5ecc809181913a5c31061c83139187f148
|
digest: d4a7b40c23fd167841cc28342afdbd5ecc809181913a5c31061c83139187f148
|
||||||
|
|
@ -139,50 +162,4 @@ entries:
|
||||||
urls:
|
urls:
|
||||||
- postgres-operator-ui-1.8.0.tgz
|
- postgres-operator-ui-1.8.0.tgz
|
||||||
version: 1.8.0
|
version: 1.8.0
|
||||||
- apiVersion: v1
|
generated: "2024-03-14T17:12:46.689654615+01:00"
|
||||||
appVersion: 1.7.1
|
|
||||||
created: "2023-09-07T16:27:29.502938358+02:00"
|
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
|
||||||
database-as-a-service user experience
|
|
||||||
digest: 97aed1a1d37cd5f8441eea9522f38e56cc829786ad2134c437a5e6a15c995869
|
|
||||||
home: https://github.com/zalando/postgres-operator
|
|
||||||
keywords:
|
|
||||||
- postgres
|
|
||||||
- operator
|
|
||||||
- ui
|
|
||||||
- cloud-native
|
|
||||||
- patroni
|
|
||||||
- spilo
|
|
||||||
maintainers:
|
|
||||||
- email: opensource@zalando.de
|
|
||||||
name: Zalando
|
|
||||||
name: postgres-operator-ui
|
|
||||||
sources:
|
|
||||||
- https://github.com/zalando/postgres-operator
|
|
||||||
urls:
|
|
||||||
- postgres-operator-ui-1.7.1.tgz
|
|
||||||
version: 1.7.1
|
|
||||||
- apiVersion: v1
|
|
||||||
appVersion: 1.7.0
|
|
||||||
created: "2023-09-07T16:27:29.494088829+02:00"
|
|
||||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
|
||||||
database-as-a-service user experience
|
|
||||||
digest: 37fba1968347daad393dbd1c6ee6e5b6a24d1095f972c0102197531c62dcada8
|
|
||||||
home: https://github.com/zalando/postgres-operator
|
|
||||||
keywords:
|
|
||||||
- postgres
|
|
||||||
- operator
|
|
||||||
- ui
|
|
||||||
- cloud-native
|
|
||||||
- patroni
|
|
||||||
- spilo
|
|
||||||
maintainers:
|
|
||||||
- email: opensource@zalando.de
|
|
||||||
name: Zalando
|
|
||||||
name: postgres-operator-ui
|
|
||||||
sources:
|
|
||||||
- https://github.com/zalando/postgres-operator
|
|
||||||
urls:
|
|
||||||
- postgres-operator-ui-1.7.0.tgz
|
|
||||||
version: 1.7.0
|
|
||||||
generated: "2023-09-07T16:27:29.488457568+02:00"
|
|
||||||
|
|
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -84,11 +84,11 @@ spec:
|
||||||
"limit_iops": 16000,
|
"limit_iops": 16000,
|
||||||
"limit_throughput": 1000,
|
"limit_throughput": 1000,
|
||||||
"postgresql_versions": [
|
"postgresql_versions": [
|
||||||
|
"16",
|
||||||
"15",
|
"15",
|
||||||
"14",
|
"14",
|
||||||
"13",
|
"13",
|
||||||
"12",
|
"12"
|
||||||
"11"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
{{- if .Values.extraEnvs }}
|
{{- if .Values.extraEnvs }}
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
||||||
image:
|
image:
|
||||||
registry: registry.opensource.zalan.do
|
registry: registry.opensource.zalan.do
|
||||||
repository: acid/postgres-operator-ui
|
repository: acid/postgres-operator-ui
|
||||||
tag: v1.10.1
|
tag: v1.11.0
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
# Optionally specify an array of imagePullSecrets.
|
# Optionally specify an array of imagePullSecrets.
|
||||||
|
|
@ -45,6 +45,7 @@ envs:
|
||||||
operatorApiUrl: "http://postgres-operator:8080"
|
operatorApiUrl: "http://postgres-operator:8080"
|
||||||
operatorClusterNameLabel: "cluster-name"
|
operatorClusterNameLabel: "cluster-name"
|
||||||
resourcesVisible: "False"
|
resourcesVisible: "False"
|
||||||
|
# Set to "*" to allow viewing/creation of clusters in all namespaces
|
||||||
targetNamespace: "default"
|
targetNamespace: "default"
|
||||||
teams:
|
teams:
|
||||||
- "acid"
|
- "acid"
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
name: postgres-operator
|
name: postgres-operator
|
||||||
version: 1.10.1
|
version: 1.11.0
|
||||||
appVersion: 1.10.1
|
appVersion: 1.11.0
|
||||||
home: https://github.com/zalando/postgres-operator
|
home: https://github.com/zalando/postgres-operator
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||||
keywords:
|
keywords:
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
docker_image:
|
docker_image:
|
||||||
type: string
|
type: string
|
||||||
default: "ghcr.io/zalando/spilo-15:3.0-p1"
|
default: "ghcr.io/zalando/spilo-16:3.2-p2"
|
||||||
enable_crd_registration:
|
enable_crd_registration:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
|
|
@ -167,10 +167,10 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
minimal_major_version:
|
minimal_major_version:
|
||||||
type: string
|
type: string
|
||||||
default: "11"
|
default: "12"
|
||||||
target_major_version:
|
target_major_version:
|
||||||
type: string
|
type: string
|
||||||
default: "15"
|
default: "16"
|
||||||
kubernetes:
|
kubernetes:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -205,9 +205,15 @@ spec:
|
||||||
enable_cross_namespace_secret:
|
enable_cross_namespace_secret:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
enable_finalizers:
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
enable_init_containers:
|
enable_init_containers:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
|
enable_persistent_volume_claim_deletion:
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
enable_pod_antiaffinity:
|
enable_pod_antiaffinity:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
@ -278,6 +284,19 @@ spec:
|
||||||
pdb_name_format:
|
pdb_name_format:
|
||||||
type: string
|
type: string
|
||||||
default: "postgres-{cluster}-pdb"
|
default: "postgres-{cluster}-pdb"
|
||||||
|
persistent_volume_claim_retention_policy:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
when_deleted:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- "delete"
|
||||||
|
- "retain"
|
||||||
|
when_scaled:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- "delete"
|
||||||
|
- "retain"
|
||||||
pod_antiaffinity_preferred_during_scheduling:
|
pod_antiaffinity_preferred_during_scheduling:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
@ -349,19 +368,15 @@ spec:
|
||||||
default_cpu_limit:
|
default_cpu_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "1"
|
|
||||||
default_cpu_request:
|
default_cpu_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "100m"
|
|
||||||
default_memory_limit:
|
default_memory_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "500Mi"
|
|
||||||
default_memory_request:
|
default_memory_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "100Mi"
|
|
||||||
max_cpu_request:
|
max_cpu_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
|
|
@ -371,11 +386,9 @@ spec:
|
||||||
min_cpu_limit:
|
min_cpu_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "250m"
|
|
||||||
min_memory_limit:
|
min_memory_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "250Mi"
|
|
||||||
timeouts:
|
timeouts:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -489,7 +502,7 @@ spec:
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
logical_backup_docker_image:
|
logical_backup_docker_image:
|
||||||
type: string
|
type: string
|
||||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1"
|
default: "registry.opensource.zalan.do/acid/logical-backup:v1.11.0"
|
||||||
logical_backup_google_application_credentials:
|
logical_backup_google_application_credentials:
|
||||||
type: string
|
type: string
|
||||||
logical_backup_job_prefix:
|
logical_backup_job_prefix:
|
||||||
|
|
@ -526,6 +539,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
|
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
|
||||||
default: "30 00 * * *"
|
default: "30 00 * * *"
|
||||||
|
logical_backup_cronjob_environment_secret:
|
||||||
|
type: string
|
||||||
debug:
|
debug:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -637,7 +652,7 @@ spec:
|
||||||
default: "pooler"
|
default: "pooler"
|
||||||
connection_pooler_image:
|
connection_pooler_image:
|
||||||
type: string
|
type: string
|
||||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
default: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
|
||||||
connection_pooler_max_db_connections:
|
connection_pooler_max_db_connections:
|
||||||
type: integer
|
type: integer
|
||||||
default: 60
|
default: 60
|
||||||
|
|
@ -654,19 +669,15 @@ spec:
|
||||||
connection_pooler_default_cpu_limit:
|
connection_pooler_default_cpu_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "1"
|
|
||||||
connection_pooler_default_cpu_request:
|
connection_pooler_default_cpu_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "500m"
|
|
||||||
connection_pooler_default_memory_limit:
|
connection_pooler_default_memory_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "100Mi"
|
|
||||||
connection_pooler_default_memory_request:
|
connection_pooler_default_memory_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "100Mi"
|
|
||||||
patroni:
|
patroni:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
||||||
|
|
@ -371,12 +371,12 @@ spec:
|
||||||
version:
|
version:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
- "10"
|
|
||||||
- "11"
|
- "11"
|
||||||
- "12"
|
- "12"
|
||||||
- "13"
|
- "13"
|
||||||
- "14"
|
- "14"
|
||||||
- "15"
|
- "15"
|
||||||
|
- "16"
|
||||||
parameters:
|
parameters:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
|
|
@ -441,6 +441,12 @@ spec:
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
# Note: the value specified here must not be zero or be higher
|
# Note: the value specified here must not be zero or be higher
|
||||||
# than the corresponding limit.
|
# than the corresponding limit.
|
||||||
|
hugepages-2Mi:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
|
hugepages-1Gi:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
requests:
|
requests:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -450,6 +456,12 @@ spec:
|
||||||
memory:
|
memory:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
|
hugepages-2Mi:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
|
hugepages-1Gi:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
schedulerName:
|
schedulerName:
|
||||||
type: string
|
type: string
|
||||||
serviceAnnotations:
|
serviceAnnotations:
|
||||||
|
|
@ -501,6 +513,8 @@ spec:
|
||||||
type: integer
|
type: integer
|
||||||
database:
|
database:
|
||||||
type: string
|
type: string
|
||||||
|
enableRecovery:
|
||||||
|
type: boolean
|
||||||
filter:
|
filter:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
|
|
@ -518,6 +532,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
payloadColumn:
|
payloadColumn:
|
||||||
type: string
|
type: string
|
||||||
|
recoveryEventType:
|
||||||
|
type: string
|
||||||
teamId:
|
teamId:
|
||||||
type: string
|
type: string
|
||||||
tls:
|
tls:
|
||||||
|
|
@ -596,6 +612,11 @@ spec:
|
||||||
- SUPERUSER
|
- SUPERUSER
|
||||||
- nosuperuser
|
- nosuperuser
|
||||||
- NOSUPERUSER
|
- NOSUPERUSER
|
||||||
|
usersIgnoringSecretRotation:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
usersWithInPlaceSecretRotation:
|
usersWithInPlaceSecretRotation:
|
||||||
type: array
|
type: array
|
||||||
nullable: true
|
nullable: true
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,31 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
entries:
|
entries:
|
||||||
postgres-operator:
|
postgres-operator:
|
||||||
|
- apiVersion: v2
|
||||||
|
appVersion: 1.11.0
|
||||||
|
created: "2024-03-14T17:11:54.311938906+01:00"
|
||||||
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
|
in Kubernetes
|
||||||
|
digest: f12f5ae9282dd77d37e3bfd0aa47be58ed0b2f02056889d8f1111bdb2b9fe286
|
||||||
|
home: https://github.com/zalando/postgres-operator
|
||||||
|
keywords:
|
||||||
|
- postgres
|
||||||
|
- operator
|
||||||
|
- cloud-native
|
||||||
|
- patroni
|
||||||
|
- spilo
|
||||||
|
maintainers:
|
||||||
|
- email: opensource@zalando.de
|
||||||
|
name: Zalando
|
||||||
|
name: postgres-operator
|
||||||
|
sources:
|
||||||
|
- https://github.com/zalando/postgres-operator
|
||||||
|
urls:
|
||||||
|
- postgres-operator-1.11.0.tgz
|
||||||
|
version: 1.11.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.10.1
|
appVersion: 1.10.1
|
||||||
created: "2023-09-07T16:26:25.96185313+02:00"
|
created: "2024-03-14T17:11:54.3101439+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c
|
digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c
|
||||||
|
|
@ -25,7 +47,7 @@ entries:
|
||||||
version: 1.10.1
|
version: 1.10.1
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.10.0
|
appVersion: 1.10.0
|
||||||
created: "2023-09-07T16:26:25.960303202+02:00"
|
created: "2024-03-14T17:11:54.308561116+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: 60fc5c8059dfed175d14e1034b40997d9c59d33ec8ea158c0597f7228ab04b51
|
digest: 60fc5c8059dfed175d14e1034b40997d9c59d33ec8ea158c0597f7228ab04b51
|
||||||
|
|
@ -47,7 +69,7 @@ entries:
|
||||||
version: 1.10.0
|
version: 1.10.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.9.0
|
appVersion: 1.9.0
|
||||||
created: "2023-09-07T16:26:25.971662154+02:00"
|
created: "2024-03-14T17:11:54.3194627+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276
|
digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276
|
||||||
|
|
@ -69,7 +91,7 @@ entries:
|
||||||
version: 1.9.0
|
version: 1.9.0
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.8.2
|
appVersion: 1.8.2
|
||||||
created: "2023-09-07T16:26:25.97011158+02:00"
|
created: "2024-03-14T17:11:54.317846817+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: f77ffad2e98b72a621e5527015cf607935d3ed688f10ba4b626435acb9631b5b
|
digest: f77ffad2e98b72a621e5527015cf607935d3ed688f10ba4b626435acb9631b5b
|
||||||
|
|
@ -91,7 +113,7 @@ entries:
|
||||||
version: 1.8.2
|
version: 1.8.2
|
||||||
- apiVersion: v2
|
- apiVersion: v2
|
||||||
appVersion: 1.8.1
|
appVersion: 1.8.1
|
||||||
created: "2023-09-07T16:26:25.968682347+02:00"
|
created: "2024-03-14T17:11:54.315242584+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: ee0c3bb6ba72fa4289ba3b1c6060e5b312dd023faba2a61b4cb7d9e5e2cc57a5
|
digest: ee0c3bb6ba72fa4289ba3b1c6060e5b312dd023faba2a61b4cb7d9e5e2cc57a5
|
||||||
|
|
@ -113,7 +135,7 @@ entries:
|
||||||
version: 1.8.1
|
version: 1.8.1
|
||||||
- apiVersion: v1
|
- apiVersion: v1
|
||||||
appVersion: 1.8.0
|
appVersion: 1.8.0
|
||||||
created: "2023-09-07T16:26:25.967242444+02:00"
|
created: "2024-03-14T17:11:54.313632778+01:00"
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||||
in Kubernetes
|
in Kubernetes
|
||||||
digest: 3ae232cf009e09aa2ad11c171484cd2f1b72e63c59735e58fbe2b6eb842f4c86
|
digest: 3ae232cf009e09aa2ad11c171484cd2f1b72e63c59735e58fbe2b6eb842f4c86
|
||||||
|
|
@ -133,48 +155,4 @@ entries:
|
||||||
urls:
|
urls:
|
||||||
- postgres-operator-1.8.0.tgz
|
- postgres-operator-1.8.0.tgz
|
||||||
version: 1.8.0
|
version: 1.8.0
|
||||||
- apiVersion: v1
|
generated: "2024-03-14T17:11:54.305930529+01:00"
|
||||||
appVersion: 1.7.1
|
|
||||||
created: "2023-09-07T16:26:25.965786379+02:00"
|
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
|
||||||
in Kubernetes
|
|
||||||
digest: 7262563bec0b058e669ae6bcff0226e33fa9ece9c41ac46a53274046afe7700c
|
|
||||||
home: https://github.com/zalando/postgres-operator
|
|
||||||
keywords:
|
|
||||||
- postgres
|
|
||||||
- operator
|
|
||||||
- cloud-native
|
|
||||||
- patroni
|
|
||||||
- spilo
|
|
||||||
maintainers:
|
|
||||||
- email: opensource@zalando.de
|
|
||||||
name: Zalando
|
|
||||||
name: postgres-operator
|
|
||||||
sources:
|
|
||||||
- https://github.com/zalando/postgres-operator
|
|
||||||
urls:
|
|
||||||
- postgres-operator-1.7.1.tgz
|
|
||||||
version: 1.7.1
|
|
||||||
- apiVersion: v1
|
|
||||||
appVersion: 1.7.0
|
|
||||||
created: "2023-09-07T16:26:25.963469658+02:00"
|
|
||||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
|
||||||
in Kubernetes
|
|
||||||
digest: c3e99fb94305f81484b8b1af18eefb78681f3b5d057d5ad10565e4afb7c65ffe
|
|
||||||
home: https://github.com/zalando/postgres-operator
|
|
||||||
keywords:
|
|
||||||
- postgres
|
|
||||||
- operator
|
|
||||||
- cloud-native
|
|
||||||
- patroni
|
|
||||||
- spilo
|
|
||||||
maintainers:
|
|
||||||
- email: opensource@zalando.de
|
|
||||||
name: Zalando
|
|
||||||
name: postgres-operator
|
|
||||||
sources:
|
|
||||||
- https://github.com/zalando/postgres-operator
|
|
||||||
urls:
|
|
||||||
- postgres-operator-1.7.0.tgz
|
|
||||||
version: 1.7.0
|
|
||||||
generated: "2023-09-07T16:26:25.958442963+02:00"
|
|
||||||
|
|
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -38,6 +38,13 @@ Create a pod service account name.
|
||||||
{{ default (printf "%s-%v" (include "postgres-operator.fullname" .) "pod") .Values.podServiceAccount.name }}
|
{{ default (printf "%s-%v" (include "postgres-operator.fullname" .) "pod") .Values.podServiceAccount.name }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a pod priority class name.
|
||||||
|
*/}}
|
||||||
|
{{- define "postgres-pod.priorityClassName" -}}
|
||||||
|
{{ default (printf "%s-%v" (include "postgres-operator.fullname" .) "pod") .Values.podPriorityClassName.name }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
Create a controller ID.
|
Create a controller ID.
|
||||||
*/}}
|
*/}}
|
||||||
|
|
|
||||||
|
|
@ -10,9 +10,9 @@ metadata:
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
data:
|
data:
|
||||||
{{- if .Values.podPriorityClassName }}
|
{{- if or .Values.podPriorityClassName.create .Values.podPriorityClassName.name }}
|
||||||
pod_priority_class_name: {{ .Values.podPriorityClassName }}
|
pod_priority_class_name: {{ include "postgres-pod.priorityClassName" . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
||||||
{{- include "flattenValuesForConfigMap" .Values.configGeneral | indent 2 }}
|
{{- include "flattenValuesForConfigMap" .Values.configGeneral | indent 2 }}
|
||||||
{{- include "flattenValuesForConfigMap" .Values.configUsers | indent 2 }}
|
{{- include "flattenValuesForConfigMap" .Values.configUsers | indent 2 }}
|
||||||
|
|
|
||||||
|
|
@ -16,8 +16,8 @@ configuration:
|
||||||
major_version_upgrade:
|
major_version_upgrade:
|
||||||
{{ toYaml .Values.configMajorVersionUpgrade | indent 4 }}
|
{{ toYaml .Values.configMajorVersionUpgrade | indent 4 }}
|
||||||
kubernetes:
|
kubernetes:
|
||||||
{{- if .Values.podPriorityClassName }}
|
{{- if .Values.podPriorityClassName.name }}
|
||||||
pod_priority_class_name: {{ .Values.podPriorityClassName }}
|
pod_priority_class_name: {{ .Values.podPriorityClassName.name }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
||||||
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
|
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
{{- if .Values.podPriorityClassName }}
|
{{- if .Values.podPriorityClassName.create }}
|
||||||
apiVersion: scheduling.k8s.io/v1
|
apiVersion: scheduling.k8s.io/v1
|
||||||
description: 'Use only for databases controlled by Postgres operator'
|
description: 'Use only for databases controlled by Postgres operator'
|
||||||
kind: PriorityClass
|
kind: PriorityClass
|
||||||
|
|
@ -8,9 +8,9 @@ metadata:
|
||||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
name: {{ .Values.podPriorityClassName }}
|
name: {{ include "postgres-pod.priorityClassName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
preemptionPolicy: PreemptLowerPriority
|
preemptionPolicy: PreemptLowerPriority
|
||||||
globalDefault: false
|
globalDefault: false
|
||||||
value: 1000000
|
value: {{ .Values.podPriorityClassName.priority }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
image:
|
image:
|
||||||
registry: registry.opensource.zalan.do
|
registry: registry.opensource.zalan.do
|
||||||
repository: acid/postgres-operator
|
repository: acid/postgres-operator
|
||||||
tag: v1.10.1
|
tag: v1.11.0
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
# Optionally specify an array of imagePullSecrets.
|
# Optionally specify an array of imagePullSecrets.
|
||||||
|
|
@ -38,7 +38,7 @@ configGeneral:
|
||||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||||
etcd_host: ""
|
etcd_host: ""
|
||||||
# Spilo docker image
|
# Spilo docker image
|
||||||
docker_image: ghcr.io/zalando/spilo-15:3.0-p1
|
docker_image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||||
|
|
||||||
# key name for annotation to ignore globally configured instance limits
|
# key name for annotation to ignore globally configured instance limits
|
||||||
# ignore_instance_limits_annotation_key: ""
|
# ignore_instance_limits_annotation_key: ""
|
||||||
|
|
@ -89,9 +89,9 @@ configMajorVersionUpgrade:
|
||||||
# - acid
|
# - acid
|
||||||
|
|
||||||
# minimal Postgres major version that will not automatically be upgraded
|
# minimal Postgres major version that will not automatically be upgraded
|
||||||
minimal_major_version: "11"
|
minimal_major_version: "12"
|
||||||
# target Postgres major version when upgrading clusters automatically
|
# target Postgres major version when upgrading clusters automatically
|
||||||
target_major_version: "15"
|
target_major_version: "16"
|
||||||
|
|
||||||
configKubernetes:
|
configKubernetes:
|
||||||
# list of additional capabilities for postgres container
|
# list of additional capabilities for postgres container
|
||||||
|
|
@ -123,8 +123,14 @@ configKubernetes:
|
||||||
|
|
||||||
# allow user secrets in other namespaces than the Postgres cluster
|
# allow user secrets in other namespaces than the Postgres cluster
|
||||||
enable_cross_namespace_secret: false
|
enable_cross_namespace_secret: false
|
||||||
|
# use finalizers to ensure all managed resources are deleted prior to the postgresql CR
|
||||||
|
# this avoids stale resources in case the operator misses a delete event or is not running
|
||||||
|
# during deletion
|
||||||
|
enable_finalizers: false
|
||||||
# enables initContainers to run actions before Spilo is started
|
# enables initContainers to run actions before Spilo is started
|
||||||
enable_init_containers: true
|
enable_init_containers: true
|
||||||
|
# toggles if operator should delete PVCs on cluster deletion
|
||||||
|
enable_persistent_volume_claim_deletion: true
|
||||||
# toggles pod anti affinity on the Postgres pods
|
# toggles pod anti affinity on the Postgres pods
|
||||||
enable_pod_antiaffinity: false
|
enable_pod_antiaffinity: false
|
||||||
# toggles PDB to set to MinAvailabe 0 or 1
|
# toggles PDB to set to MinAvailabe 0 or 1
|
||||||
|
|
@ -165,6 +171,10 @@ configKubernetes:
|
||||||
|
|
||||||
# defines the template for PDB (Pod Disruption Budget) names
|
# defines the template for PDB (Pod Disruption Budget) names
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
|
# specify the PVC retention policy when scaling down and/or deleting
|
||||||
|
persistent_volume_claim_retention_policy:
|
||||||
|
when_deleted: "retain"
|
||||||
|
when_scaled: "retain"
|
||||||
# switches pod anti affinity type to `preferredDuringSchedulingIgnoredDuringExecution`
|
# switches pod anti affinity type to `preferredDuringSchedulingIgnoredDuringExecution`
|
||||||
pod_antiaffinity_preferred_during_scheduling: false
|
pod_antiaffinity_preferred_during_scheduling: false
|
||||||
# override topology key for pod anti affinity
|
# override topology key for pod anti affinity
|
||||||
|
|
@ -348,7 +358,7 @@ configLogicalBackup:
|
||||||
# logical_backup_memory_request: ""
|
# logical_backup_memory_request: ""
|
||||||
|
|
||||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1"
|
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.11.0"
|
||||||
# path of google cloud service account json file
|
# path of google cloud service account json file
|
||||||
# logical_backup_google_application_credentials: ""
|
# logical_backup_google_application_credentials: ""
|
||||||
|
|
||||||
|
|
@ -372,6 +382,8 @@ configLogicalBackup:
|
||||||
logical_backup_s3_retention_time: ""
|
logical_backup_s3_retention_time: ""
|
||||||
# backup schedule in the cron format
|
# backup schedule in the cron format
|
||||||
logical_backup_schedule: "30 00 * * *"
|
logical_backup_schedule: "30 00 * * *"
|
||||||
|
# secret to be used as reference for env variables in cronjob
|
||||||
|
logical_backup_cronjob_environment_secret: ""
|
||||||
|
|
||||||
# automate creation of human users with teams API service
|
# automate creation of human users with teams API service
|
||||||
configTeamsApi:
|
configTeamsApi:
|
||||||
|
|
@ -416,7 +428,7 @@ configConnectionPooler:
|
||||||
# db user for pooler to use
|
# db user for pooler to use
|
||||||
connection_pooler_user: "pooler"
|
connection_pooler_user: "pooler"
|
||||||
# docker image
|
# docker image
|
||||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
|
||||||
# max db connections the pooler should hold
|
# max db connections the pooler should hold
|
||||||
connection_pooler_max_db_connections: 60
|
connection_pooler_max_db_connections: 60
|
||||||
# default pooling mode
|
# default pooling mode
|
||||||
|
|
@ -458,7 +470,14 @@ podServiceAccount:
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
# priority class for database pods
|
# priority class for database pods
|
||||||
podPriorityClassName: ""
|
podPriorityClassName:
|
||||||
|
# If create is false with no name set, no podPriorityClassName is specified.
|
||||||
|
# Hence, the pod priorityClass is the one with globalDefault set.
|
||||||
|
# If there is no PriorityClass with globalDefault set, the priority of Pods with no priorityClassName is zero.
|
||||||
|
create: true
|
||||||
|
# If not set a name is generated using the fullname template and "-pod" suffix
|
||||||
|
name: ""
|
||||||
|
priority: 1000000
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,9 @@ version: "2017-09-20"
|
||||||
pipeline:
|
pipeline:
|
||||||
- id: build-postgres-operator
|
- id: build-postgres-operator
|
||||||
type: script
|
type: script
|
||||||
vm: large
|
vm_config:
|
||||||
|
type: linux
|
||||||
|
size: large
|
||||||
cache:
|
cache:
|
||||||
paths:
|
paths:
|
||||||
- /go/pkg/mod
|
- /go/pkg/mod
|
||||||
|
|
@ -16,7 +18,7 @@ pipeline:
|
||||||
- desc: 'Install go'
|
- desc: 'Install go'
|
||||||
cmd: |
|
cmd: |
|
||||||
cd /tmp
|
cd /tmp
|
||||||
wget -q https://storage.googleapis.com/golang/go1.19.8.linux-amd64.tar.gz -O go.tar.gz
|
wget -q https://storage.googleapis.com/golang/go1.21.7.linux-amd64.tar.gz -O go.tar.gz
|
||||||
tar -xf go.tar.gz
|
tar -xf go.tar.gz
|
||||||
mv go /usr/local
|
mv go /usr/local
|
||||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||||
|
|
@ -37,9 +39,6 @@ pipeline:
|
||||||
cmd: |
|
cmd: |
|
||||||
export PATH=$PATH:$HOME/go/bin
|
export PATH=$PATH:$HOME/go/bin
|
||||||
go test ./...
|
go test ./...
|
||||||
- desc: 'Run e2e tests'
|
|
||||||
cmd: |
|
|
||||||
make e2e
|
|
||||||
- desc: 'Push docker image'
|
- desc: 'Push docker image'
|
||||||
cmd: |
|
cmd: |
|
||||||
export PATH=$PATH:$HOME/go/bin
|
export PATH=$PATH:$HOME/go/bin
|
||||||
|
|
@ -55,6 +54,8 @@ pipeline:
|
||||||
|
|
||||||
- id: build-operator-ui
|
- id: build-operator-ui
|
||||||
type: script
|
type: script
|
||||||
|
vm_config:
|
||||||
|
type: linux
|
||||||
|
|
||||||
commands:
|
commands:
|
||||||
- desc: 'Prepare environment'
|
- desc: 'Prepare environment'
|
||||||
|
|
@ -83,6 +84,8 @@ pipeline:
|
||||||
|
|
||||||
- id: build-logical-backup
|
- id: build-logical-backup
|
||||||
type: script
|
type: script
|
||||||
|
vm_config:
|
||||||
|
type: linux
|
||||||
|
|
||||||
commands:
|
commands:
|
||||||
- desc: Build image
|
- desc: Build image
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3.15:latest
|
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest
|
||||||
ARG VERSION=latest
|
ARG VERSION=latest
|
||||||
|
|
||||||
FROM ubuntu:20.04 as builder
|
FROM ubuntu:20.04 as builder
|
||||||
|
|
@ -13,6 +13,7 @@ RUN bash docker/build_operator.sh
|
||||||
|
|
||||||
FROM ${BASE_IMAGE}
|
FROM ${BASE_IMAGE}
|
||||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||||
|
LABEL org.opencontainers.image.source="https://github.com/zalando/postgres-operator"
|
||||||
|
|
||||||
# We need root certificates to deal with teams api over https
|
# We need root certificates to deal with teams api over https
|
||||||
RUN apk --no-cache add curl
|
RUN apk --no-cache add curl
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ apt-get install -y wget
|
||||||
|
|
||||||
(
|
(
|
||||||
cd /tmp
|
cd /tmp
|
||||||
wget -q "https://storage.googleapis.com/golang/go1.19.8.linux-${arch}.tar.gz" -O go.tar.gz
|
wget -q "https://storage.googleapis.com/golang/go1.21.7.linux-${arch}.tar.gz" -O go.tar.gz
|
||||||
tar -xf go.tar.gz
|
tar -xf go.tar.gz
|
||||||
mv go /usr/local
|
mv go /usr/local
|
||||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
FROM registry.opensource.zalan.do/library/ubuntu-18.04:latest
|
ARG BASE_IMAGE=registry.opensource.zalan.do/library/ubuntu-22.04:latest
|
||||||
|
FROM ${BASE_IMAGE}
|
||||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
@ -24,12 +25,11 @@ RUN apt-get update \
|
||||||
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
|
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get install --no-install-recommends -y \
|
&& apt-get install --no-install-recommends -y \
|
||||||
|
postgresql-client-16 \
|
||||||
postgresql-client-15 \
|
postgresql-client-15 \
|
||||||
postgresql-client-14 \
|
postgresql-client-14 \
|
||||||
postgresql-client-13 \
|
postgresql-client-13 \
|
||||||
postgresql-client-12 \
|
postgresql-client-12 \
|
||||||
postgresql-client-11 \
|
|
||||||
postgresql-client-10 \
|
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -355,6 +355,23 @@ This would be the recommended option to enable rotation in secrets of database
|
||||||
owners, but only if they are not used as application users for regular read
|
owners, but only if they are not used as application users for regular read
|
||||||
and write operations.
|
and write operations.
|
||||||
|
|
||||||
|
### Ignore rotation for certain users
|
||||||
|
|
||||||
|
If you wish to globally enable password rotation but need certain users to
|
||||||
|
opt out from it there are two ways. First, you can remove the user from the
|
||||||
|
manifest's `users` section. The corresponding secret to this user will no
|
||||||
|
longer be synced by the operator then.
|
||||||
|
|
||||||
|
Secondly, if you want the operator to continue syncing the secret (e.g. to
|
||||||
|
recreate if it got accidentally removed) but cannot allow it being rotated,
|
||||||
|
add the user to the following list in your manifest:
|
||||||
|
|
||||||
|
```
|
||||||
|
spec:
|
||||||
|
usersIgnoringSecretRotation:
|
||||||
|
- bar_user
|
||||||
|
```
|
||||||
|
|
||||||
### Turning off password rotation
|
### Turning off password rotation
|
||||||
|
|
||||||
When password rotation is turned off again the operator will check if the
|
When password rotation is turned off again the operator will check if the
|
||||||
|
|
@ -1200,7 +1217,7 @@ aws_or_gcp:
|
||||||
|
|
||||||
If cluster members have to be (re)initialized restoring physical backups
|
If cluster members have to be (re)initialized restoring physical backups
|
||||||
happens automatically either from the backup location or by running
|
happens automatically either from the backup location or by running
|
||||||
[pg_basebackup](https://www.postgresql.org/docs/15/app-pgbasebackup.html)
|
[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html)
|
||||||
on one of the other running instances (preferably replicas if they do not lag
|
on one of the other running instances (preferably replicas if they do not lag
|
||||||
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
|
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
|
||||||
clusters.
|
clusters.
|
||||||
|
|
@ -1348,6 +1365,8 @@ You can also expose the operator API through a [service](https://github.com/zala
|
||||||
Some displayed options can be disabled from UI using simple flags under the
|
Some displayed options can be disabled from UI using simple flags under the
|
||||||
`OPERATOR_UI_CONFIG` field in the deployment.
|
`OPERATOR_UI_CONFIG` field in the deployment.
|
||||||
|
|
||||||
|
The viewing and creation of clusters within the UI is limited to the namespace specified by the `TARGET_NAMESPACE` option. To allow the creation and viewing of clusters in all namespaces, set `TARGET_NAMESPACE` to `*`.
|
||||||
|
|
||||||
### Deploy the UI on K8s
|
### Deploy the UI on K8s
|
||||||
|
|
||||||
Now, apply all manifests from the `ui/manifests` folder to deploy the Postgres
|
Now, apply all manifests from the `ui/manifests` folder to deploy the Postgres
|
||||||
|
|
|
||||||
Binary file not shown.
|
After Width: | Height: | Size: 922 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 887 KiB |
|
|
@ -142,6 +142,14 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
||||||
database, like a flyway user running a migration on Pod start. See more
|
database, like a flyway user running a migration on Pod start. See more
|
||||||
details in the [administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#password-replacement-without-extra-users).
|
details in the [administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#password-replacement-without-extra-users).
|
||||||
|
|
||||||
|
* **usersIgnoringSecretRotation**
|
||||||
|
if you have secret rotation enabled globally you can define a list of
|
||||||
|
of users that should opt out from it, for example if you store credentials
|
||||||
|
outside of K8s, too, and corresponding deployments cannot dynamically
|
||||||
|
reference secrets. Note, you can also opt out from the rotation by removing
|
||||||
|
users from the manifest's `users` section. The operator will not drop them
|
||||||
|
from the database. Optional.
|
||||||
|
|
||||||
* **databases**
|
* **databases**
|
||||||
a map of database names to database owners for the databases that should be
|
a map of database names to database owners for the databases that should be
|
||||||
created by the operator. The owner users should already exist on the cluster
|
created by the operator. The owner users should already exist on the cluster
|
||||||
|
|
@ -359,6 +367,14 @@ CPU and memory requests for the Postgres container.
|
||||||
memory requests for the Postgres container. Optional, overrides the
|
memory requests for the Postgres container. Optional, overrides the
|
||||||
`default_memory_request` operator configuration parameter.
|
`default_memory_request` operator configuration parameter.
|
||||||
|
|
||||||
|
* **hugepages-2Mi**
|
||||||
|
hugepages-2Mi requests for the sidecar container.
|
||||||
|
Optional, defaults to not set.
|
||||||
|
|
||||||
|
* **hugepages-1Gi**
|
||||||
|
1Gi hugepages requests for the sidecar container.
|
||||||
|
Optional, defaults to not set.
|
||||||
|
|
||||||
### Limits
|
### Limits
|
||||||
|
|
||||||
CPU and memory limits for the Postgres container.
|
CPU and memory limits for the Postgres container.
|
||||||
|
|
@ -371,6 +387,14 @@ CPU and memory limits for the Postgres container.
|
||||||
memory limits for the Postgres container. Optional, overrides the
|
memory limits for the Postgres container. Optional, overrides the
|
||||||
`default_memory_limits` operator configuration parameter.
|
`default_memory_limits` operator configuration parameter.
|
||||||
|
|
||||||
|
* **hugepages-2Mi**
|
||||||
|
hugepages-2Mi requests for the sidecar container.
|
||||||
|
Optional, defaults to not set.
|
||||||
|
|
||||||
|
* **hugepages-1Gi**
|
||||||
|
1Gi hugepages requests for the sidecar container.
|
||||||
|
Optional, defaults to not set.
|
||||||
|
|
||||||
## Parameters defining how to clone the cluster from another one
|
## Parameters defining how to clone the cluster from another one
|
||||||
|
|
||||||
Those parameters are applied when the cluster should be a clone of another one
|
Those parameters are applied when the cluster should be a clone of another one
|
||||||
|
|
@ -500,6 +524,14 @@ CPU and memory requests for the sidecar container.
|
||||||
memory requests for the sidecar container. Optional, overrides the
|
memory requests for the sidecar container. Optional, overrides the
|
||||||
`default_memory_request` operator configuration parameter. Optional.
|
`default_memory_request` operator configuration parameter. Optional.
|
||||||
|
|
||||||
|
* **hugepages-2Mi**
|
||||||
|
hugepages-2Mi requests for the sidecar container.
|
||||||
|
Optional, defaults to not set.
|
||||||
|
|
||||||
|
* **hugepages-1Gi**
|
||||||
|
1Gi hugepages requests for the sidecar container.
|
||||||
|
Optional, defaults to not set.
|
||||||
|
|
||||||
### Limits
|
### Limits
|
||||||
|
|
||||||
CPU and memory limits for the sidecar container.
|
CPU and memory limits for the sidecar container.
|
||||||
|
|
@ -512,6 +544,14 @@ CPU and memory limits for the sidecar container.
|
||||||
memory limits for the sidecar container. Optional, overrides the
|
memory limits for the sidecar container. Optional, overrides the
|
||||||
`default_memory_limits` operator configuration parameter. Optional.
|
`default_memory_limits` operator configuration parameter. Optional.
|
||||||
|
|
||||||
|
* **hugepages-2Mi**
|
||||||
|
hugepages-2Mi requests for the sidecar container.
|
||||||
|
Optional, defaults to not set.
|
||||||
|
|
||||||
|
* **hugepages-1Gi**
|
||||||
|
1Gi hugepages requests for the sidecar container.
|
||||||
|
Optional, defaults to not set.
|
||||||
|
|
||||||
## Connection pooler
|
## Connection pooler
|
||||||
|
|
||||||
Parameters are grouped under the `connectionPooler` top-level key and specify
|
Parameters are grouped under the `connectionPooler` top-level key and specify
|
||||||
|
|
@ -581,7 +621,7 @@ the global configuration before adding the `tls` section'.
|
||||||
## Change data capture streams
|
## Change data capture streams
|
||||||
|
|
||||||
This sections enables change data capture (CDC) streams via Postgres'
|
This sections enables change data capture (CDC) streams via Postgres'
|
||||||
[logical decoding](https://www.postgresql.org/docs/15/logicaldecoding.html)
|
[logical decoding](https://www.postgresql.org/docs/16/logicaldecoding.html)
|
||||||
feature and `pgoutput` plugin. While the Postgres operator takes responsibility
|
feature and `pgoutput` plugin. While the Postgres operator takes responsibility
|
||||||
for providing the setup to publish change events, it relies on external tools
|
for providing the setup to publish change events, it relies on external tools
|
||||||
to consume them. At Zalando, we are using a workflow based on
|
to consume them. At Zalando, we are using a workflow based on
|
||||||
|
|
@ -613,7 +653,7 @@ can have the following properties:
|
||||||
and `payloadColumn`). The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
|
and `payloadColumn`). The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
|
||||||
The application is responsible for putting events into a (JSON/B or VARCHAR)
|
The application is responsible for putting events into a (JSON/B or VARCHAR)
|
||||||
payload column of the outbox table in the structure of the specified target
|
payload column of the outbox table in the structure of the specified target
|
||||||
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/15/logical-replication-publication.html)
|
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/16/logical-replication-publication.html)
|
||||||
in Postgres for all tables specified for one `database` and `applicationId`.
|
in Postgres for all tables specified for one `database` and `applicationId`.
|
||||||
The CDC operator will consume from it shortly after transactions are
|
The CDC operator will consume from it shortly after transactions are
|
||||||
committed to the outbox table. The `idColumn` will be used in telemetry for
|
committed to the outbox table. The `idColumn` will be used in telemetry for
|
||||||
|
|
|
||||||
|
|
@ -3,33 +3,46 @@
|
||||||
There are two mutually-exclusive methods to set the Postgres Operator
|
There are two mutually-exclusive methods to set the Postgres Operator
|
||||||
configuration.
|
configuration.
|
||||||
|
|
||||||
* ConfigMaps-based, the legacy one. The configuration is supplied in a
|
* ConfigMaps-based, the legacy one
|
||||||
key-value configmap, defined by the `CONFIG_MAP_NAME` environment variable.
|
* CRD-based configuration
|
||||||
Non-scalar values, i.e. lists or maps, are encoded in the value strings using
|
|
||||||
the comma-based syntax for lists and coma-separated `key:value` syntax for
|
|
||||||
maps. String values containing ':' should be enclosed in quotes. The
|
|
||||||
configuration is flat, parameter group names below are not reflected in the
|
|
||||||
configuration structure. There is an
|
|
||||||
[example](https://github.com/zalando/postgres-operator/blob/master/manifests/configmap.yaml)
|
|
||||||
|
|
||||||
* CRD-based configuration. The configuration is stored in a custom YAML
|
Variable names are underscore-separated words.
|
||||||
manifest. The manifest is an instance of the custom resource definition (CRD)
|
|
||||||
called `OperatorConfiguration`. The operator registers this CRD during the
|
|
||||||
start and uses it for configuration if the [operator deployment manifest](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L36)
|
|
||||||
sets the `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` env variable to a non-empty
|
|
||||||
value. The variable should point to the `postgresql-operator-configuration`
|
|
||||||
object in the operator's namespace.
|
|
||||||
|
|
||||||
The CRD-based configuration is a regular YAML document; non-scalar keys are
|
### ConfigMaps-based
|
||||||
simply represented in the usual YAML way. There are no default values built-in
|
The configuration is supplied in a
|
||||||
in the operator, each parameter that is not supplied in the configuration
|
key-value configmap, defined by the `CONFIG_MAP_NAME` environment variable.
|
||||||
receives an empty value. In order to create your own configuration just copy
|
Non-scalar values, i.e. lists or maps, are encoded in the value strings using
|
||||||
the [default one](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
|
the comma-based syntax for lists and coma-separated `key:value` syntax for
|
||||||
and change it.
|
maps. String values containing ':' should be enclosed in quotes. The
|
||||||
|
configuration is flat, parameter group names below are not reflected in the
|
||||||
|
configuration structure. There is an
|
||||||
|
[example](https://github.com/zalando/postgres-operator/blob/master/manifests/configmap.yaml)
|
||||||
|
|
||||||
To test the CRD-based configuration locally, use the following
|
For the configmap configuration, the [default parameter values](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config.go#L14)
|
||||||
|
mentioned here are likely to be overwritten in your local operator installation
|
||||||
|
via your local version of the operator configmap. In the case you use the
|
||||||
|
operator CRD, all the CRD defaults are provided in the
|
||||||
|
[operator's default configuration manifest](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
|
||||||
|
|
||||||
```bash
|
### CRD-based configuration
|
||||||
|
The configuration is stored in a custom YAML
|
||||||
|
manifest. The manifest is an instance of the custom resource definition (CRD)
|
||||||
|
called `OperatorConfiguration`. The operator registers this CRD during the
|
||||||
|
start and uses it for configuration if the [operator deployment manifest](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L36)
|
||||||
|
sets the `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` env variable to a non-empty
|
||||||
|
value. The variable should point to the `postgresql-operator-configuration`
|
||||||
|
object in the operator's namespace.
|
||||||
|
|
||||||
|
The CRD-based configuration is a regular YAML document; non-scalar keys are
|
||||||
|
simply represented in the usual YAML way. There are no default values built-in
|
||||||
|
in the operator, each parameter that is not supplied in the configuration
|
||||||
|
receives an empty value. In order to create your own configuration just copy
|
||||||
|
the [default one](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
|
||||||
|
and change it.
|
||||||
|
|
||||||
|
To test the CRD-based configuration locally, use the following
|
||||||
|
|
||||||
|
```bash
|
||||||
kubectl create -f manifests/operatorconfiguration.crd.yaml # registers the CRD
|
kubectl create -f manifests/operatorconfiguration.crd.yaml # registers the CRD
|
||||||
kubectl create -f manifests/postgresql-operator-default-configuration.yaml
|
kubectl create -f manifests/postgresql-operator-default-configuration.yaml
|
||||||
|
|
||||||
|
|
@ -37,7 +50,7 @@ configuration.
|
||||||
kubectl create -f manifests/postgres-operator.yaml # set the env var as mentioned above
|
kubectl create -f manifests/postgres-operator.yaml # set the env var as mentioned above
|
||||||
|
|
||||||
kubectl get operatorconfigurations postgresql-operator-default-configuration -o yaml
|
kubectl get operatorconfigurations postgresql-operator-default-configuration -o yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
The CRD-based configuration is more powerful than the one based on ConfigMaps
|
The CRD-based configuration is more powerful than the one based on ConfigMaps
|
||||||
and should be used unless there is a compatibility requirement to use an already
|
and should be used unless there is a compatibility requirement to use an already
|
||||||
|
|
@ -58,15 +71,6 @@ parameters, those parameters have no effect and are replaced by the
|
||||||
`CRD_READY_WAIT_INTERVAL` and `CRD_READY_WAIT_TIMEOUT` environment variables.
|
`CRD_READY_WAIT_INTERVAL` and `CRD_READY_WAIT_TIMEOUT` environment variables.
|
||||||
They will be deprecated and removed in the future.
|
They will be deprecated and removed in the future.
|
||||||
|
|
||||||
For the configmap configuration, the [default parameter values](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config.go#L14)
|
|
||||||
mentioned here are likely to be overwritten in your local operator installation
|
|
||||||
via your local version of the operator configmap. In the case you use the
|
|
||||||
operator CRD, all the CRD defaults are provided in the
|
|
||||||
[operator's default configuration manifest](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
|
|
||||||
|
|
||||||
Variable names are underscore-separated words.
|
|
||||||
|
|
||||||
|
|
||||||
## General
|
## General
|
||||||
|
|
||||||
Those are top-level keys, containing both leaf keys and groups.
|
Those are top-level keys, containing both leaf keys and groups.
|
||||||
|
|
@ -246,12 +250,12 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key.
|
||||||
|
|
||||||
* **minimal_major_version**
|
* **minimal_major_version**
|
||||||
The minimal Postgres major version that will not automatically be upgraded
|
The minimal Postgres major version that will not automatically be upgraded
|
||||||
when `major_version_upgrade_mode` is set to `"full"`. The default is `"11"`.
|
when `major_version_upgrade_mode` is set to `"full"`. The default is `"12"`.
|
||||||
|
|
||||||
* **target_major_version**
|
* **target_major_version**
|
||||||
The target Postgres major version when upgrading clusters automatically
|
The target Postgres major version when upgrading clusters automatically
|
||||||
which violate the configured allowed `minimal_major_version` when
|
which violate the configured allowed `minimal_major_version` when
|
||||||
`major_version_upgrade_mode` is set to `"full"`. The default is `"15"`.
|
`major_version_upgrade_mode` is set to `"full"`. The default is `"16"`.
|
||||||
|
|
||||||
## Kubernetes resources
|
## Kubernetes resources
|
||||||
|
|
||||||
|
|
@ -323,6 +327,45 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
replaced by the cluster name. Only the `{cluster}` placeholders is allowed in
|
replaced by the cluster name. Only the `{cluster}` placeholders is allowed in
|
||||||
the template.
|
the template.
|
||||||
|
|
||||||
|
* **pdb_master_label_selector**
|
||||||
|
By default the PDB will match the master role hence preventing nodes to be
|
||||||
|
drained if the node_readiness_label is not used. This option if set to `false`
|
||||||
|
will not add the `spilo-role=master` selector to the PDB.
|
||||||
|
|
||||||
|
* **enable_finalizers**
|
||||||
|
By default, a deletion of the Postgresql resource will trigger an event
|
||||||
|
that leads to a cleanup of all child resources. However, if the database
|
||||||
|
cluster is in a broken state (e.g. failed initialization) and the operator
|
||||||
|
cannot fully sync it, there can be leftovers. By enabling finalizers the
|
||||||
|
operator will ensure all managed resources are deleted prior to the
|
||||||
|
Postgresql resource. There is a trade-off though: The deletion is only
|
||||||
|
performed after the next two SYNC cycles with the first one updating the
|
||||||
|
internal spec and the latter reacting on the `deletionTimestamp` while
|
||||||
|
processing the SYNC event. The final removal of the custom resource will
|
||||||
|
add a DELETE event to the worker queue but the child resources are already
|
||||||
|
gone at this point.
|
||||||
|
The default is `false`.
|
||||||
|
|
||||||
|
* **persistent_volume_claim_retention_policy**
|
||||||
|
The operator tries to protect volumes as much as possible. If somebody
|
||||||
|
accidentally deletes the statefulset or scales in the `numberOfInstances` the
|
||||||
|
Persistent Volume Claims and thus Persistent Volumes will be retained.
|
||||||
|
However, this can have some consequences when you scale out again at a much
|
||||||
|
later point, for example after the cluster's Postgres major version has been
|
||||||
|
upgraded, because the old volume runs the old Postgres version with stale data.
|
||||||
|
Even if the version has not changed the replication lag could be massive. In
|
||||||
|
this case a reinitialization of the re-added member would make sense. You can
|
||||||
|
also modify the [retention policy of PVCs](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention) in the operator configuration.
|
||||||
|
The behavior can be changed for two scenarios: `when_deleted` - default is
|
||||||
|
`"retain"` - or `when_scaled` - default is also `"retain"`. The other possible
|
||||||
|
option is `delete`.
|
||||||
|
|
||||||
|
* **enable_persistent_volume_claim_deletion**
|
||||||
|
By default, the operator deletes PersistentVolumeClaims when removing the
|
||||||
|
Postgres cluster manifest, no matter if `persistent_volume_claim_retention_policy`
|
||||||
|
on the statefulset is set to `retain`. To keep PVCs set this option to `false`.
|
||||||
|
The default is `true`.
|
||||||
|
|
||||||
* **enable_pod_disruption_budget**
|
* **enable_pod_disruption_budget**
|
||||||
PDB is enabled by default to protect the cluster from voluntarily disruptions
|
PDB is enabled by default to protect the cluster from voluntarily disruptions
|
||||||
and hence unwanted DB downtime. However, on some cloud providers it could be
|
and hence unwanted DB downtime. However, on some cloud providers it could be
|
||||||
|
|
@ -527,19 +570,19 @@ CRD-based configuration.
|
||||||
|
|
||||||
* **default_cpu_request**
|
* **default_cpu_request**
|
||||||
CPU request value for the Postgres containers, unless overridden by
|
CPU request value for the Postgres containers, unless overridden by
|
||||||
cluster-specific settings. The default is `100m`.
|
cluster-specific settings. Empty string or `0` disables the default.
|
||||||
|
|
||||||
* **default_memory_request**
|
* **default_memory_request**
|
||||||
memory request value for the Postgres containers, unless overridden by
|
memory request value for the Postgres containers, unless overridden by
|
||||||
cluster-specific settings. The default is `100Mi`.
|
cluster-specific settings. Empty string or `0` disables the default.
|
||||||
|
|
||||||
* **default_cpu_limit**
|
* **default_cpu_limit**
|
||||||
CPU limits for the Postgres containers, unless overridden by cluster-specific
|
CPU limits for the Postgres containers, unless overridden by cluster-specific
|
||||||
settings. The default is `1`.
|
settings. Empty string or `0` disables the default.
|
||||||
|
|
||||||
* **default_memory_limit**
|
* **default_memory_limit**
|
||||||
memory limits for the Postgres containers, unless overridden by cluster-specific
|
memory limits for the Postgres containers, unless overridden by cluster-specific
|
||||||
settings. The default is `500Mi`.
|
settings. Empty string or `0` disables the default.
|
||||||
|
|
||||||
* **max_cpu_request**
|
* **max_cpu_request**
|
||||||
optional upper boundary for CPU request
|
optional upper boundary for CPU request
|
||||||
|
|
@ -549,11 +592,11 @@ CRD-based configuration.
|
||||||
|
|
||||||
* **min_cpu_limit**
|
* **min_cpu_limit**
|
||||||
hard CPU minimum what we consider to be required to properly run Postgres
|
hard CPU minimum what we consider to be required to properly run Postgres
|
||||||
clusters with Patroni on Kubernetes. The default is `250m`.
|
clusters with Patroni on Kubernetes.
|
||||||
|
|
||||||
* **min_memory_limit**
|
* **min_memory_limit**
|
||||||
hard memory minimum what we consider to be required to properly run Postgres
|
hard memory minimum what we consider to be required to properly run Postgres
|
||||||
clusters with Patroni on Kubernetes. The default is `250Mi`.
|
clusters with Patroni on Kubernetes.
|
||||||
|
|
||||||
## Patroni options
|
## Patroni options
|
||||||
|
|
||||||
|
|
@ -774,7 +817,7 @@ grouped under the `logical_backup` key.
|
||||||
runs `pg_dumpall` on a replica if possible and uploads compressed results to
|
runs `pg_dumpall` on a replica if possible and uploads compressed results to
|
||||||
an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`.
|
an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`.
|
||||||
The default image is the same image built with the Zalando-internal CI
|
The default image is the same image built with the Zalando-internal CI
|
||||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1"
|
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.11.0"
|
||||||
|
|
||||||
* **logical_backup_google_application_credentials**
|
* **logical_backup_google_application_credentials**
|
||||||
Specifies the path of the google cloud service account json file. Default is empty.
|
Specifies the path of the google cloud service account json file. Default is empty.
|
||||||
|
|
@ -825,6 +868,9 @@ grouped under the `logical_backup` key.
|
||||||
[reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule)
|
[reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule)
|
||||||
into account. Default: "30 00 \* \* \*"
|
into account. Default: "30 00 \* \* \*"
|
||||||
|
|
||||||
|
* **logical_backup_cronjob_environment_secret**
|
||||||
|
Reference to a Kubernetes secret, which keys will be added as environment variables to the cronjob. Default: ""
|
||||||
|
|
||||||
## Debugging the operator
|
## Debugging the operator
|
||||||
|
|
||||||
Options to aid debugging of the operator itself. Grouped under the `debug` key.
|
Options to aid debugging of the operator itself. Grouped under the `debug` key.
|
||||||
|
|
@ -1001,5 +1047,4 @@ operator being able to provide some reasonable defaults.
|
||||||
**connection_pooler_default_memory_reques**
|
**connection_pooler_default_memory_reques**
|
||||||
**connection_pooler_default_cpu_limit**
|
**connection_pooler_default_cpu_limit**
|
||||||
**connection_pooler_default_memory_limit**
|
**connection_pooler_default_memory_limit**
|
||||||
Default resource configuration for connection pooler deployment. The internal
|
Default resource configuration for connection pooler deployment.
|
||||||
default for memory request and limit is `100Mi`, for CPU it is `500m` and `1`.
|
|
||||||
|
|
|
||||||
78
docs/user.md
78
docs/user.md
|
|
@ -30,7 +30,7 @@ spec:
|
||||||
databases:
|
databases:
|
||||||
foo: zalando
|
foo: zalando
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "15"
|
version: "16"
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
|
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
|
||||||
|
|
@ -109,7 +109,7 @@ metadata:
|
||||||
spec:
|
spec:
|
||||||
[...]
|
[...]
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "15"
|
version: "16"
|
||||||
parameters:
|
parameters:
|
||||||
password_encryption: scram-sha-256
|
password_encryption: scram-sha-256
|
||||||
```
|
```
|
||||||
|
|
@ -517,7 +517,7 @@ Postgres Operator will create the following NOLOGIN roles:
|
||||||
|
|
||||||
The `<dbname>_owner` role is the database owner and should be used when creating
|
The `<dbname>_owner` role is the database owner and should be used when creating
|
||||||
new database objects. All members of the `admin` role, e.g. teams API roles, can
|
new database objects. All members of the `admin` role, e.g. teams API roles, can
|
||||||
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/15/sql-alterdefaultprivileges.html)
|
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/16/sql-alterdefaultprivileges.html)
|
||||||
are configured for the owner role so that the `<dbname>_reader` role
|
are configured for the owner role so that the `<dbname>_reader` role
|
||||||
automatically gets read-access (SELECT) to new tables and sequences and the
|
automatically gets read-access (SELECT) to new tables and sequences and the
|
||||||
`<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables,
|
`<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables,
|
||||||
|
|
@ -580,7 +580,9 @@ For all LOGIN roles the operator will create K8s secrets in the namespace
|
||||||
specified in `secretNamespace`, if `enable_cross_namespace_secret` is set to
|
specified in `secretNamespace`, if `enable_cross_namespace_secret` is set to
|
||||||
`true` in the config. Otherwise, they are created in the same namespace like
|
`true` in the config. Otherwise, they are created in the same namespace like
|
||||||
the Postgres cluster. Unlike roles specified with `namespace.username` under
|
the Postgres cluster. Unlike roles specified with `namespace.username` under
|
||||||
`users`, the namespace will not be part of the role name here.
|
`users`, the namespace will not be part of the role name here. Keep in mind
|
||||||
|
that the underscores in a role name are replaced with dashes in the K8s
|
||||||
|
secret name.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
spec:
|
spec:
|
||||||
|
|
@ -592,7 +594,7 @@ spec:
|
||||||
|
|
||||||
### Schema `search_path` for default roles
|
### Schema `search_path` for default roles
|
||||||
|
|
||||||
The schema [`search_path`](https://www.postgresql.org/docs/15/ddl-schemas.html#DDL-SCHEMAS-PATH)
|
The schema [`search_path`](https://www.postgresql.org/docs/16/ddl-schemas.html#DDL-SCHEMAS-PATH)
|
||||||
for each role will include the role name and the schemas, this role should have
|
for each role will include the role name and the schemas, this role should have
|
||||||
access to. So `foo_bar_writer` does not have to schema-qualify tables from
|
access to. So `foo_bar_writer` does not have to schema-qualify tables from
|
||||||
schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and
|
schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and
|
||||||
|
|
@ -687,6 +689,30 @@ The minimum limits to properly run the `postgresql` resource are configured to
|
||||||
manifest the operator will raise the limits to the configured minimum values.
|
manifest the operator will raise the limits to the configured minimum values.
|
||||||
If no resources are defined in the manifest they will be obtained from the
|
If no resources are defined in the manifest they will be obtained from the
|
||||||
configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests).
|
configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests).
|
||||||
|
If neither defaults nor minimum limits are configured the operator will not
|
||||||
|
specify any resources and it's up to K8s (or your own) admission hooks to
|
||||||
|
handle it.
|
||||||
|
|
||||||
|
### HugePages support
|
||||||
|
|
||||||
|
The operator supports [HugePages](https://www.postgresql.org/docs/16/kernel-resources.html#LINUX-HUGEPAGES).
|
||||||
|
To enable HugePages, set the matching resource requests and/or limits in the manifest:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
hugepages-2Mi: 250Mi
|
||||||
|
hugepages-1Gi: 1Gi
|
||||||
|
limits:
|
||||||
|
hugepages-2Mi: 500Mi
|
||||||
|
hugepages-1Gi: 2Gi
|
||||||
|
```
|
||||||
|
|
||||||
|
There are no minimums or maximums and the default is 0 for both HugePage sizes,
|
||||||
|
but Kubernetes will not spin up the pod if the requested HugePages cannot be allocated.
|
||||||
|
For more information on HugePages in Kubernetes, see also
|
||||||
|
[https://kubernetes.io/docs/tasks/manage-hugepages/scheduling-hugepages/](https://kubernetes.io/docs/tasks/manage-hugepages/scheduling-hugepages/)
|
||||||
|
|
||||||
## Use taints, tolerations and node affinity for dedicated PostgreSQL nodes
|
## Use taints, tolerations and node affinity for dedicated PostgreSQL nodes
|
||||||
|
|
||||||
|
|
@ -732,7 +758,7 @@ If you need to define a `nodeAffinity` for all your Postgres clusters use the
|
||||||
## In-place major version upgrade
|
## In-place major version upgrade
|
||||||
|
|
||||||
Starting with Spilo 13, operator supports in-place major version upgrade to a
|
Starting with Spilo 13, operator supports in-place major version upgrade to a
|
||||||
higher major version (e.g. from PG 10 to PG 13). To trigger the upgrade,
|
higher major version (e.g. from PG 11 to PG 13). To trigger the upgrade,
|
||||||
simply increase the version in the manifest. It is your responsibility to test
|
simply increase the version in the manifest. It is your responsibility to test
|
||||||
your applications against the new version before the upgrade; downgrading is
|
your applications against the new version before the upgrade; downgrading is
|
||||||
not supported. The easiest way to do so is to try the upgrade on the cloned
|
not supported. The easiest way to do so is to try the upgrade on the cloned
|
||||||
|
|
@ -812,7 +838,7 @@ spec:
|
||||||
### Clone directly
|
### Clone directly
|
||||||
|
|
||||||
Another way to get a fresh copy of your source DB cluster is via
|
Another way to get a fresh copy of your source DB cluster is via
|
||||||
[pg_basebackup](https://www.postgresql.org/docs/15/app-pgbasebackup.html). To
|
[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html). To
|
||||||
use this feature simply leave out the timestamp field from the clone section.
|
use this feature simply leave out the timestamp field from the clone section.
|
||||||
The operator will connect to the service of the source cluster by name. If the
|
The operator will connect to the service of the source cluster by name. If the
|
||||||
cluster is called test, then the connection string will look like host=test
|
cluster is called test, then the connection string will look like host=test
|
||||||
|
|
@ -938,33 +964,25 @@ established between standby replica(s).
|
||||||
One big advantage of standby clusters is that they can be promoted to a proper
|
One big advantage of standby clusters is that they can be promoted to a proper
|
||||||
database cluster. This means it will stop replicating changes from the source,
|
database cluster. This means it will stop replicating changes from the source,
|
||||||
and start accept writes itself. This mechanism makes it possible to move
|
and start accept writes itself. This mechanism makes it possible to move
|
||||||
databases from one place to another with minimal downtime. Currently, the
|
databases from one place to another with minimal downtime.
|
||||||
operator does not support promoting a standby cluster. It has to be done
|
|
||||||
manually using `patronictl edit-config` inside the postgres container of the
|
|
||||||
standby leader pod. Remove the following lines from the YAML structure and the
|
|
||||||
leader promotion happens immediately. Before doing so, make sure that the
|
|
||||||
standby is not behind the source database.
|
|
||||||
|
|
||||||
```yaml
|
Before promoting a standby cluster, make sure that the standby is not behind
|
||||||
standby_cluster:
|
the source database. You should ideally stop writes to your source cluster and
|
||||||
create_replica_methods:
|
then create a dummy database object that you check for being replicated in the
|
||||||
- bootstrap_standby_with_wale
|
target to verify all data has been copied.
|
||||||
- basebackup_fast_xlog
|
|
||||||
restore_command: envdir "/home/postgres/etc/wal-e.d/env-standby" /scripts/restore_command.sh
|
|
||||||
"%f" "%p"
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally, remove the `standby` section from the postgres cluster manifest.
|
To promote, remove the `standby` section from the postgres cluster manifest.
|
||||||
|
A rolling update will be triggered removing the `STANDBY_*` environment
|
||||||
|
variables from the pods, followed by a Patroni config update that promotes the
|
||||||
|
cluster.
|
||||||
|
|
||||||
### Turn a normal cluster into a standby
|
### Adding standby section after promotion
|
||||||
|
|
||||||
There is no way to transform a non-standby cluster to a standby cluster through
|
Turning a running cluster into a standby is not easily possible and should be
|
||||||
the operator. Adding the `standby` section to the manifest of a running
|
avoided. The best way is to remove the cluster and resubmit the manifest
|
||||||
Postgres cluster will have no effect. But, as explained in the previous
|
after a short wait of a few minutes. Adding the `standby` section would turn
|
||||||
paragraph it can be done manually through `patronictl edit-config`. This time,
|
the database cluster in read-only mode on next operator SYNC cycle but it
|
||||||
by adding the `standby_cluster` section to the Patroni configuration. However,
|
does not sync automatically with the source cluster again.
|
||||||
the transformed standby cluster will not be doing any streaming. It will be in
|
|
||||||
standby mode and allow read-only transactions only.
|
|
||||||
|
|
||||||
## Sidecar Support
|
## Sidecar Support
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ tools:
|
||||||
# install pinned version of 'kind'
|
# install pinned version of 'kind'
|
||||||
# go install must run outside of a dir with a (module-based) Go project !
|
# go install must run outside of a dir with a (module-based) Go project !
|
||||||
# otherwise go install updates project's dependencies and/or behaves differently
|
# otherwise go install updates project's dependencies and/or behaves differently
|
||||||
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.14.0
|
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.22.0
|
||||||
|
|
||||||
e2etest: tools copy clean
|
e2etest: tools copy clean
|
||||||
./run.sh main
|
./run.sh main
|
||||||
|
|
|
||||||
|
|
@ -4,3 +4,5 @@ nodes:
|
||||||
- role: control-plane
|
- role: control-plane
|
||||||
- role: worker
|
- role: worker
|
||||||
- role: worker
|
- role: worker
|
||||||
|
featureGates:
|
||||||
|
StatefulSetAutoDeletePVC: true
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
kubernetes==24.2.0
|
kubernetes==29.2.0
|
||||||
timeout_decorator==0.5.0
|
timeout_decorator==0.5.0
|
||||||
pyyaml==6.0
|
pyyaml==6.0.1
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ IFS=$'\n\t'
|
||||||
|
|
||||||
readonly cluster_name="postgres-operator-e2e-tests"
|
readonly cluster_name="postgres-operator-e2e-tests"
|
||||||
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
||||||
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-15-e2e:0.1"
|
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-16-e2e:0.1"
|
||||||
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
|
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
|
||||||
|
|
||||||
export GOPATH=${GOPATH-~/go}
|
export GOPATH=${GOPATH-~/go}
|
||||||
|
|
|
||||||
|
|
@ -202,6 +202,9 @@ class K8s:
|
||||||
return len(self.api.policy_v1.list_namespaced_pod_disruption_budget(
|
return len(self.api.policy_v1.list_namespaced_pod_disruption_budget(
|
||||||
namespace, label_selector=labels).items)
|
namespace, label_selector=labels).items)
|
||||||
|
|
||||||
|
def count_pvcs_with_label(self, labels, namespace='default'):
|
||||||
|
return len(self.api.core_v1.list_namespaced_persistent_volume_claim(namespace, label_selector=labels).items)
|
||||||
|
|
||||||
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||||
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||||
|
|
@ -311,7 +314,7 @@ class K8s:
|
||||||
|
|
||||||
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
|
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
|
||||||
result = self.get_patroni_state(pod)
|
result = self.get_patroni_state(pod)
|
||||||
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
|
return list(filter(lambda x: "State" in x and x["State"] in ["running", "streaming"], result))
|
||||||
|
|
||||||
def get_deployment_replica_count(self, name="acid-minimal-cluster-pooler", namespace="default"):
|
def get_deployment_replica_count(self, name="acid-minimal-cluster-pooler", namespace="default"):
|
||||||
try:
|
try:
|
||||||
|
|
@ -506,6 +509,9 @@ class K8sBase:
|
||||||
return len(self.api.policy_v1.list_namespaced_pod_disruption_budget(
|
return len(self.api.policy_v1.list_namespaced_pod_disruption_budget(
|
||||||
namespace, label_selector=labels).items)
|
namespace, label_selector=labels).items)
|
||||||
|
|
||||||
|
def count_pvcs_with_label(self, labels, namespace='default'):
|
||||||
|
return len(self.api.core_v1.list_namespaced_persistent_volume_claim(namespace, label_selector=labels).items)
|
||||||
|
|
||||||
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||||
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||||
|
|
@ -577,7 +583,7 @@ class K8sBase:
|
||||||
|
|
||||||
def get_patroni_running_members(self, pod):
|
def get_patroni_running_members(self, pod):
|
||||||
result = self.get_patroni_state(pod)
|
result = self.get_patroni_state(pod)
|
||||||
return list(filter(lambda x: x["State"] == "running", result))
|
return list(filter(lambda x: x["State"] in ["running", "streaming"], result))
|
||||||
|
|
||||||
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
|
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
|
||||||
ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1)
|
ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1)
|
||||||
|
|
|
||||||
|
|
@ -12,8 +12,8 @@ from kubernetes import client
|
||||||
from tests.k8s_api import K8s
|
from tests.k8s_api import K8s
|
||||||
from kubernetes.client.rest import ApiException
|
from kubernetes.client.rest import ApiException
|
||||||
|
|
||||||
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-15-e2e:0.1"
|
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1"
|
||||||
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-15-e2e:0.2"
|
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2"
|
||||||
|
|
||||||
|
|
||||||
def to_selector(labels):
|
def to_selector(labels):
|
||||||
|
|
@ -1200,6 +1200,69 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
self.evantuallyEqual(check_version_14, "14", "Version was not upgrade to 14")
|
self.evantuallyEqual(check_version_14, "14", "Version was not upgrade to 14")
|
||||||
|
|
||||||
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
|
def test_persistent_volume_claim_retention_policy(self):
|
||||||
|
'''
|
||||||
|
Test the retention policy for persistent volume claim
|
||||||
|
'''
|
||||||
|
k8s = self.k8s
|
||||||
|
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||||
|
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 2, "PVCs is not equal to number of instance")
|
||||||
|
|
||||||
|
# patch the pvc retention policy to enable delete when scale down
|
||||||
|
patch_scaled_policy_delete = {
|
||||||
|
"data": {
|
||||||
|
"persistent_volume_claim_retention_policy": "when_deleted:retain,when_scaled:delete"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
k8s.update_config(patch_scaled_policy_delete)
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
|
pg_patch_scale_down_instances = {
|
||||||
|
'spec': {
|
||||||
|
'numberOfInstances': 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# decrease the number of instances
|
||||||
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
|
'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', pg_patch_scale_down_instances)
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},"Operator does not get in sync")
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 1, "PVCs is not deleted when scaled down")
|
||||||
|
|
||||||
|
pg_patch_scale_up_instances = {
|
||||||
|
'spec': {
|
||||||
|
'numberOfInstances': 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
|
'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', pg_patch_scale_up_instances)
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},"Operator does not get in sync")
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 2, "PVCs is not equal to number of instances")
|
||||||
|
|
||||||
|
# reset retention policy to retain
|
||||||
|
patch_scaled_policy_retain = {
|
||||||
|
"data": {
|
||||||
|
"persistent_volume_claim_retention_policy": "when_deleted:retain,when_scaled:retain"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
k8s.update_config(patch_scaled_policy_retain)
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
|
# decrease the number of instances
|
||||||
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
|
'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', pg_patch_scale_down_instances)
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},"Operator does not get in sync")
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_running_pods(), 1, "Scale down to 1 failed")
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 2, "PVCs is deleted when scaled down")
|
||||||
|
|
||||||
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
|
'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', pg_patch_scale_up_instances)
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},"Operator does not get in sync")
|
||||||
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 2, "PVCs is not equal to number of instances")
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_resource_generation(self):
|
def test_resource_generation(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -1297,6 +1360,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
|
@unittest.skip("Skipping this test until fixed")
|
||||||
def test_node_affinity(self):
|
def test_node_affinity(self):
|
||||||
'''
|
'''
|
||||||
Add label to a node and update postgres cluster spec to deploy only on a node with that label
|
Add label to a node and update postgres cluster spec to deploy only on a node with that label
|
||||||
|
|
@ -1514,15 +1578,18 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
today = date.today()
|
today = date.today()
|
||||||
|
|
||||||
# enable password rotation for owner of foo database
|
# enable password rotation for owner of foo database
|
||||||
pg_patch_inplace_rotation_for_owner = {
|
pg_patch_rotation_single_users = {
|
||||||
"spec": {
|
"spec": {
|
||||||
|
"usersIgnoringSecretRotation": [
|
||||||
|
"test.db_user"
|
||||||
|
],
|
||||||
"usersWithInPlaceSecretRotation": [
|
"usersWithInPlaceSecretRotation": [
|
||||||
"zalando"
|
"zalando"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_inplace_rotation_for_owner)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_rotation_single_users)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
# check if next rotation date was set in secret
|
# check if next rotation date was set in secret
|
||||||
|
|
@ -1611,6 +1678,13 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.eventuallyEqual(lambda: len(self.query_database_with_user(leader.metadata.name, "postgres", "SELECT 1", "foo_user")), 1,
|
self.eventuallyEqual(lambda: len(self.query_database_with_user(leader.metadata.name, "postgres", "SELECT 1", "foo_user")), 1,
|
||||||
"Could not connect to the database with rotation user {}".format(rotation_user), 10, 5)
|
"Could not connect to the database with rotation user {}".format(rotation_user), 10, 5)
|
||||||
|
|
||||||
|
# check if rotation has been ignored for user from test_cross_namespace_secrets test
|
||||||
|
db_user_secret = k8s.get_secret(username="test.db_user", namespace="test")
|
||||||
|
secret_username = str(base64.b64decode(db_user_secret.data["username"]), 'utf-8')
|
||||||
|
|
||||||
|
self.assertEqual("test.db_user", secret_username,
|
||||||
|
"Unexpected username in secret of test.db_user: expected {}, got {}".format("test.db_user", secret_username))
|
||||||
|
|
||||||
# disable password rotation for all other users (foo_user)
|
# disable password rotation for all other users (foo_user)
|
||||||
# and pick smaller intervals to see if the third fake rotation user is dropped
|
# and pick smaller intervals to see if the third fake rotation user is dropped
|
||||||
enable_password_rotation = {
|
enable_password_rotation = {
|
||||||
|
|
@ -1974,7 +2048,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
patch_delete_annotations = {
|
patch_delete_annotations = {
|
||||||
"data": {
|
"data": {
|
||||||
"delete_annotation_date_key": "delete-date",
|
"delete_annotation_date_key": "delete-date",
|
||||||
"delete_annotation_name_key": "delete-clustername"
|
"delete_annotation_name_key": "delete-clustername",
|
||||||
|
"enable_persistent_volume_claim_deletion": "false"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_delete_annotations)
|
k8s.update_config(patch_delete_annotations)
|
||||||
|
|
@ -2035,6 +2110,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted")
|
self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted")
|
||||||
self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted")
|
self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted")
|
||||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets not deleted")
|
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets not deleted")
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 3, "PVCs were deleted although disabled in config")
|
||||||
|
|
||||||
except timeout_decorator.TimeoutError:
|
except timeout_decorator.TimeoutError:
|
||||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
|
|
||||||
67
go.mod
67
go.mod
|
|
@ -1,71 +1,70 @@
|
||||||
module github.com/zalando/postgres-operator
|
module github.com/zalando/postgres-operator
|
||||||
|
|
||||||
go 1.19
|
go 1.21
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go v1.42.18
|
github.com/aws/aws-sdk-go v1.42.18
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/lib/pq v1.10.4
|
github.com/lib/pq v1.10.9
|
||||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/r3labs/diff v1.1.0
|
github.com/r3labs/diff v1.1.0
|
||||||
github.com/sirupsen/logrus v1.9.0
|
github.com/sirupsen/logrus v1.9.0
|
||||||
github.com/stretchr/testify v1.8.0
|
github.com/stretchr/testify v1.8.2
|
||||||
golang.org/x/crypto v0.8.0
|
golang.org/x/crypto v0.18.0
|
||||||
golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
k8s.io/api v0.25.9
|
k8s.io/api v0.28.7
|
||||||
k8s.io/apiextensions-apiserver v0.25.9
|
k8s.io/apiextensions-apiserver v0.25.9
|
||||||
k8s.io/apimachinery v0.25.9
|
k8s.io/apimachinery v0.28.7
|
||||||
k8s.io/client-go v0.25.9
|
k8s.io/client-go v0.28.7
|
||||||
k8s.io/code-generator v0.25.9
|
k8s.io/code-generator v0.25.9
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
github.com/go-logr/logr v1.2.3 // indirect
|
github.com/go-logr/logr v1.2.4 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||||
github.com/go-openapi/swag v0.19.14 // indirect
|
github.com/go-openapi/swag v0.22.3 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
github.com/google/gnostic-models v0.6.8 // indirect
|
||||||
github.com/google/go-cmp v0.5.8 // indirect
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
github.com/google/gofuzz v1.1.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.6 // indirect
|
github.com/imdario/mergo v0.3.6 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/mailru/easyjson v0.7.6 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
github.com/moby/spdystream v0.2.0 // indirect
|
github.com/moby/spdystream v0.2.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
golang.org/x/mod v0.8.0 // indirect
|
golang.org/x/mod v0.14.0 // indirect
|
||||||
golang.org/x/net v0.9.0 // indirect
|
golang.org/x/net v0.20.0 // indirect
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
golang.org/x/oauth2 v0.8.0 // indirect
|
||||||
golang.org/x/sys v0.7.0 // indirect
|
golang.org/x/sys v0.16.0 // indirect
|
||||||
golang.org/x/term v0.7.0 // indirect
|
golang.org/x/term v0.16.0 // indirect
|
||||||
golang.org/x/text v0.9.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
golang.org/x/tools v0.6.0 // indirect
|
golang.org/x/tools v0.17.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
|
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
|
||||||
k8s.io/klog/v2 v2.70.1 // indirect
|
k8s.io/klog/v2 v2.100.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
||||||
491
go.sum
491
go.sum
|
|
@ -1,148 +1,52 @@
|
||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
|
||||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
|
||||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
|
||||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
|
||||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
|
||||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
|
||||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
|
||||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
|
||||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
|
||||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
|
||||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
|
||||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
|
||||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
|
||||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
|
||||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
|
||||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
|
||||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
|
||||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
|
||||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
|
||||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
|
||||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
|
||||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
|
||||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
|
||||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
|
||||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
|
||||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
|
||||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
|
||||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||||
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||||
github.com/aws/aws-sdk-go v1.42.18 h1:2f/cDNwQ3e+yHxtPn1si0to3GalbNHwkRm461IjwRiM=
|
github.com/aws/aws-sdk-go v1.42.18 h1:2f/cDNwQ3e+yHxtPn1si0to3GalbNHwkRm461IjwRiM=
|
||||||
github.com/aws/aws-sdk-go v1.42.18/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
github.com/aws/aws-sdk-go v1.42.18/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
|
|
||||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
|
||||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||||
github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
|
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||||
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
|
||||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
|
||||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
|
||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
|
||||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||||
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
|
||||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
|
||||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
|
||||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
|
||||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
|
|
@ -153,23 +57,20 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
|
||||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
|
||||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
|
||||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk=
|
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||||
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
|
||||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
|
||||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
|
@ -181,321 +82,108 @@ github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+p
|
||||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
|
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
|
||||||
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
|
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M=
|
github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M=
|
||||||
github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig=
|
github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
|
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||||
|
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|
||||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
|
||||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
|
||||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
|
||||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
|
||||||
golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a h1:tlXy25amD5A7gOfbXdqCGN5k8ESEed/Ee1E5RcrYnqU=
|
|
||||||
golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
|
||||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
|
||||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
|
||||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
|
||||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
|
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
|
||||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
|
||||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
|
||||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
|
||||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
|
||||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
|
||||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
|
||||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
|
||||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
|
||||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
|
||||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
|
||||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
|
||||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
|
||||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
|
||||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
|
||||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
|
||||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
|
||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
|
||||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
|
@ -503,42 +191,31 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
k8s.io/api v0.28.7 h1:YKIhBxjXKaxuxWJnwohV0aGjRA5l4IU0Eywf/q19AVI=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
k8s.io/api v0.28.7/go.mod h1:y4RbcjCCMff1930SG/TcP3AUKNfaJUgIeUp58e/2vyY=
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
|
||||||
k8s.io/api v0.25.9 h1:XuJ2bz2F52jZmp3YjUcp/pozH8kY1BlBHdXnoOXBP3U=
|
|
||||||
k8s.io/api v0.25.9/go.mod h1:9YRWzD0cRHzfsnf9e5OQsQ4Un6cbZ//Xv3jo44YKm2Y=
|
|
||||||
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
|
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
|
||||||
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
|
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
|
||||||
k8s.io/apimachinery v0.25.9 h1:MPjgTz4dbAKJ/KiHIvDeYkFfIn7ueihqvT520HkV7v4=
|
k8s.io/apimachinery v0.28.7 h1:2Z38/XRAOcpb+PonxmBEmjG7hBfmmr41xnr0XvpTnB4=
|
||||||
k8s.io/apimachinery v0.25.9/go.mod h1:ZTl0drTQaFi5gMM3snYI5tWV1XJmRH1gfnDx2QCLsxk=
|
k8s.io/apimachinery v0.28.7/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA=
|
||||||
k8s.io/client-go v0.25.9 h1:U0S3nc71NRfHXiA0utyCkPt3Mv1SWpQw0g5VfBCv5xg=
|
k8s.io/client-go v0.28.7 h1:3L6402+tjmOl8twX3fjUQ/wsYAkw6UlVNDVP+rF6YGA=
|
||||||
k8s.io/client-go v0.25.9/go.mod h1:tmPyOtpbbkneXj65EYZ4sXun1BE/2F2XlRABVj9CBgc=
|
k8s.io/client-go v0.28.7/go.mod h1:xIoEaDewZ+EwWOo1/F1t0IOKMPe1rwBZhLu9Es6y0tE=
|
||||||
k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w=
|
k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w=
|
||||||
k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI=
|
k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI=
|
||||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI=
|
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI=
|
||||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
|
||||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
|
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
||||||
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
|
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
|
||||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
|
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
|
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
|
||||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||||
|
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||||
|
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ cleanup() {
|
||||||
}
|
}
|
||||||
trap "cleanup" EXIT SIGINT
|
trap "cleanup" EXIT SIGINT
|
||||||
|
|
||||||
bash "${CODEGEN_PKG}/generate-groups.sh" all \
|
bash "${CODEGEN_PKG}/generate-groups.sh" client,deepcopy,informer,lister \
|
||||||
"${OPERATOR_PACKAGE_ROOT}/pkg/generated" "${OPERATOR_PACKAGE_ROOT}/pkg/apis" \
|
"${OPERATOR_PACKAGE_ROOT}/pkg/generated" "${OPERATOR_PACKAGE_ROOT}/pkg/apis" \
|
||||||
"acid.zalan.do:v1 zalando.org:v1" \
|
"acid.zalan.do:v1 zalando.org:v1" \
|
||||||
--go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt \
|
--go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt \
|
||||||
|
|
|
||||||
|
|
@ -25,8 +25,8 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
|
@ -56,7 +56,7 @@ func create(fileName string) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
ymlFile, err := ioutil.ReadFile(fileName)
|
ymlFile, err := os.ReadFile(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,8 +25,8 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
|
@ -77,7 +77,7 @@ func deleteByFile(file string) {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ymlFile, err := ioutil.ReadFile(file)
|
ymlFile, err := os.ReadFile(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,8 +25,8 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
|
@ -60,7 +60,7 @@ func updatePgResources(fileName string) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
ymlFile, err := ioutil.ReadFile(fileName)
|
ymlFile, err := os.ReadFile(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,39 +1,39 @@
|
||||||
module github.com/zalando/postgres-operator/kubectl-pg
|
module github.com/zalando/postgres-operator/kubectl-pg
|
||||||
|
|
||||||
go 1.18
|
go 1.21
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/spf13/cobra v1.4.0
|
github.com/spf13/cobra v1.7.0
|
||||||
github.com/spf13/viper v1.9.0
|
github.com/spf13/viper v1.9.0
|
||||||
github.com/zalando/postgres-operator v1.10.0
|
github.com/zalando/postgres-operator v1.10.1
|
||||||
k8s.io/api v0.25.9
|
k8s.io/api v0.28.7
|
||||||
k8s.io/apiextensions-apiserver v0.25.9
|
k8s.io/apiextensions-apiserver v0.28.7
|
||||||
k8s.io/apimachinery v0.25.9
|
k8s.io/apimachinery v0.28.7
|
||||||
k8s.io/client-go v0.25.9
|
k8s.io/client-go v0.28.7
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/go-logr/logr v1.2.3 // indirect
|
github.com/go-logr/logr v1.2.4 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||||
github.com/go-openapi/swag v0.19.14 // indirect
|
github.com/go-openapi/swag v0.22.3 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
github.com/google/gnostic-models v0.6.8 // indirect
|
||||||
github.com/google/gofuzz v1.1.0 // indirect
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.6 // indirect
|
github.com/imdario/mergo v0.3.6 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/magiconair/properties v1.8.5 // indirect
|
github.com/magiconair/properties v1.8.5 // indirect
|
||||||
github.com/mailru/easyjson v0.7.6 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
||||||
github.com/moby/spdystream v0.2.0 // indirect
|
github.com/moby/spdystream v0.2.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
|
|
@ -47,23 +47,23 @@ require (
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/subosito/gotenv v1.2.0 // indirect
|
github.com/subosito/gotenv v1.2.0 // indirect
|
||||||
golang.org/x/crypto v0.8.0 // indirect
|
golang.org/x/crypto v0.17.0 // indirect
|
||||||
golang.org/x/net v0.9.0 // indirect
|
golang.org/x/net v0.19.0 // indirect
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
golang.org/x/oauth2 v0.8.0 // indirect
|
||||||
golang.org/x/sys v0.7.0 // indirect
|
golang.org/x/sys v0.15.0 // indirect
|
||||||
golang.org/x/term v0.7.0 // indirect
|
golang.org/x/term v0.15.0 // indirect
|
||||||
golang.org/x/text v0.9.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/ini.v1 v1.63.2 // indirect
|
gopkg.in/ini.v1 v1.63.2 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/klog/v2 v2.70.1 // indirect
|
k8s.io/klog/v2 v2.100.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -45,16 +45,13 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
|
||||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||||
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
|
|
@ -68,15 +65,13 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht
|
||||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
|
|
||||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
|
@ -87,24 +82,24 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
|
||||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||||
|
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||||
|
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
|
||||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||||
github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
|
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||||
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
|
@ -137,13 +132,14 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
|
@ -156,10 +152,11 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
|
|
@ -177,9 +174,12 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe
|
||||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||||
|
|
@ -211,8 +211,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
|
@ -224,18 +224,18 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
|
||||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
|
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
|
||||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
|
||||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
|
|
@ -266,10 +266,10 @@ github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+p
|
||||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
|
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
|
||||||
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
|
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
||||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||||
|
|
@ -282,6 +282,8 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
|
github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
|
||||||
|
|
@ -289,28 +291,32 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg
|
||||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
|
||||||
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
|
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
|
||||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||||
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
|
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
|
||||||
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
|
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk=
|
github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk=
|
||||||
github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
|
github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
|
||||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
|
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||||
|
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
|
@ -318,8 +324,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
github.com/zalando/postgres-operator v1.10.0 h1:7/Xp9v6knCbZc4SXc3t6/5uyiHpqumc2SYPudXazJZw=
|
github.com/zalando/postgres-operator v1.10.1 h1:2QAZam6e3dhK8D64Hc9m4eul29f1yggGMAH3ff20etw=
|
||||||
github.com/zalando/postgres-operator v1.10.0/go.mod h1:UYVdslgiYgsKSuU24Mne2qO67nuWTJwWiT1WQDurROs=
|
github.com/zalando/postgres-operator v1.10.1/go.mod h1:UYVdslgiYgsKSuU24Mne2qO67nuWTJwWiT1WQDurROs=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||||
|
|
@ -343,8 +349,8 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
|
@ -393,7 +399,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
|
@ -418,8 +423,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
|
@ -435,8 +440,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
|
||||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
|
@ -504,11 +509,12 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||||
|
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
|
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
|
@ -517,13 +523,13 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
|
@ -578,6 +584,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
|
||||||
|
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
|
@ -648,7 +656,6 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
|
@ -709,13 +716,13 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
|
|
@ -727,7 +734,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|
@ -738,27 +744,26 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
k8s.io/api v0.25.9 h1:XuJ2bz2F52jZmp3YjUcp/pozH8kY1BlBHdXnoOXBP3U=
|
k8s.io/api v0.28.7 h1:YKIhBxjXKaxuxWJnwohV0aGjRA5l4IU0Eywf/q19AVI=
|
||||||
k8s.io/api v0.25.9/go.mod h1:9YRWzD0cRHzfsnf9e5OQsQ4Un6cbZ//Xv3jo44YKm2Y=
|
k8s.io/api v0.28.7/go.mod h1:y4RbcjCCMff1930SG/TcP3AUKNfaJUgIeUp58e/2vyY=
|
||||||
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
|
k8s.io/apiextensions-apiserver v0.28.7 h1:NQlzP/vmvIO9Qt7wQTdMe9sGWGkozQZMPk9suehAvR8=
|
||||||
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
|
k8s.io/apiextensions-apiserver v0.28.7/go.mod h1:ST+ZOppyy+Z0mIxezSOK8qwIXctNwdFLNpGkQp8bw4M=
|
||||||
k8s.io/apimachinery v0.25.9 h1:MPjgTz4dbAKJ/KiHIvDeYkFfIn7ueihqvT520HkV7v4=
|
k8s.io/apimachinery v0.28.7 h1:2Z38/XRAOcpb+PonxmBEmjG7hBfmmr41xnr0XvpTnB4=
|
||||||
k8s.io/apimachinery v0.25.9/go.mod h1:ZTl0drTQaFi5gMM3snYI5tWV1XJmRH1gfnDx2QCLsxk=
|
k8s.io/apimachinery v0.28.7/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA=
|
||||||
k8s.io/client-go v0.25.9 h1:U0S3nc71NRfHXiA0utyCkPt3Mv1SWpQw0g5VfBCv5xg=
|
k8s.io/client-go v0.28.7 h1:3L6402+tjmOl8twX3fjUQ/wsYAkw6UlVNDVP+rF6YGA=
|
||||||
k8s.io/client-go v0.25.9/go.mod h1:tmPyOtpbbkneXj65EYZ4sXun1BE/2F2XlRABVj9CBgc=
|
k8s.io/client-go v0.28.7/go.mod h1:xIoEaDewZ+EwWOo1/F1t0IOKMPe1rwBZhLu9Es6y0tE=
|
||||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
||||||
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
|
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
|
||||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
|
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
|
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
|
||||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
|
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ metadata:
|
||||||
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||||
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||||
spec:
|
spec:
|
||||||
dockerImage: ghcr.io/zalando/spilo-15:3.0-p1
|
dockerImage: ghcr.io/zalando/spilo-16:3.2-p2
|
||||||
teamId: "acid"
|
teamId: "acid"
|
||||||
numberOfInstances: 2
|
numberOfInstances: 2
|
||||||
users: # Application/Robot users
|
users: # Application/Robot users
|
||||||
|
|
@ -19,6 +19,8 @@ spec:
|
||||||
- createdb
|
- createdb
|
||||||
foo_user: []
|
foo_user: []
|
||||||
# flyway: []
|
# flyway: []
|
||||||
|
# usersIgnoringSecretRotation:
|
||||||
|
# - bar_user
|
||||||
# usersWithSecretRotation:
|
# usersWithSecretRotation:
|
||||||
# - foo_user
|
# - foo_user
|
||||||
# usersWithInPlaceSecretRotation:
|
# usersWithInPlaceSecretRotation:
|
||||||
|
|
@ -46,7 +48,7 @@ spec:
|
||||||
defaultRoles: true
|
defaultRoles: true
|
||||||
defaultUsers: false
|
defaultUsers: false
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "15"
|
version: "16"
|
||||||
parameters: # Expert section
|
parameters: # Expert section
|
||||||
shared_buffers: "32MB"
|
shared_buffers: "32MB"
|
||||||
max_connections: "10"
|
max_connections: "10"
|
||||||
|
|
@ -107,9 +109,13 @@ spec:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 100Mi
|
memory: 100Mi
|
||||||
|
# hugepages-2Mi: 128Mi
|
||||||
|
# hugepages-1Gi: 1Gi
|
||||||
limits:
|
limits:
|
||||||
cpu: 500m
|
cpu: 500m
|
||||||
memory: 500Mi
|
memory: 500Mi
|
||||||
|
# hugepages-2Mi: 128Mi
|
||||||
|
# hugepages-1Gi: 1Gi
|
||||||
patroni:
|
patroni:
|
||||||
failsafe_mode: false
|
failsafe_mode: false
|
||||||
initdb:
|
initdb:
|
||||||
|
|
|
||||||
|
|
@ -13,11 +13,11 @@ data:
|
||||||
cluster_history_entries: "1000"
|
cluster_history_entries: "1000"
|
||||||
cluster_labels: application:spilo
|
cluster_labels: application:spilo
|
||||||
cluster_name_label: cluster-name
|
cluster_name_label: cluster-name
|
||||||
# connection_pooler_default_cpu_limit: "1"
|
connection_pooler_default_cpu_limit: "1"
|
||||||
# connection_pooler_default_cpu_request: "500m"
|
connection_pooler_default_cpu_request: "500m"
|
||||||
# connection_pooler_default_memory_limit: 100Mi
|
connection_pooler_default_memory_limit: 100Mi
|
||||||
# connection_pooler_default_memory_request: 100Mi
|
connection_pooler_default_memory_request: 100Mi
|
||||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
|
||||||
# connection_pooler_max_db_connections: 60
|
# connection_pooler_max_db_connections: 60
|
||||||
# connection_pooler_mode: "transaction"
|
# connection_pooler_mode: "transaction"
|
||||||
# connection_pooler_number_of_instances: 2
|
# connection_pooler_number_of_instances: 2
|
||||||
|
|
@ -28,17 +28,18 @@ data:
|
||||||
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
||||||
db_hosted_zone: db.example.com
|
db_hosted_zone: db.example.com
|
||||||
debug_logging: "true"
|
debug_logging: "true"
|
||||||
# default_cpu_limit: "1"
|
default_cpu_limit: "1"
|
||||||
# default_cpu_request: 100m
|
default_cpu_request: 100m
|
||||||
# default_memory_limit: 500Mi
|
default_memory_limit: 500Mi
|
||||||
# default_memory_request: 100Mi
|
default_memory_request: 100Mi
|
||||||
# delete_annotation_date_key: delete-date
|
# delete_annotation_date_key: delete-date
|
||||||
# delete_annotation_name_key: delete-clustername
|
# delete_annotation_name_key: delete-clustername
|
||||||
docker_image: ghcr.io/zalando/spilo-15:3.0-p1
|
docker_image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||||
# enable_admin_role_for_users: "true"
|
# enable_admin_role_for_users: "true"
|
||||||
# enable_crd_registration: "true"
|
# enable_crd_registration: "true"
|
||||||
# enable_cross_namespace_secret: "false"
|
# enable_cross_namespace_secret: "false"
|
||||||
|
enable_finalizers: "false"
|
||||||
# enable_database_access: "true"
|
# enable_database_access: "true"
|
||||||
enable_ebs_gp3_migration: "false"
|
enable_ebs_gp3_migration: "false"
|
||||||
# enable_ebs_gp3_migration_max_size: "1000"
|
# enable_ebs_gp3_migration_max_size: "1000"
|
||||||
|
|
@ -48,6 +49,7 @@ data:
|
||||||
enable_master_pooler_load_balancer: "false"
|
enable_master_pooler_load_balancer: "false"
|
||||||
enable_password_rotation: "false"
|
enable_password_rotation: "false"
|
||||||
enable_patroni_failsafe_mode: "false"
|
enable_patroni_failsafe_mode: "false"
|
||||||
|
enable_persistent_volume_claim_deletion: "true"
|
||||||
enable_pgversion_env_var: "true"
|
enable_pgversion_env_var: "true"
|
||||||
# enable_pod_antiaffinity: "false"
|
# enable_pod_antiaffinity: "false"
|
||||||
# enable_pod_disruption_budget: "true"
|
# enable_pod_disruption_budget: "true"
|
||||||
|
|
@ -80,7 +82,7 @@ data:
|
||||||
# logical_backup_azure_storage_account_key: ""
|
# logical_backup_azure_storage_account_key: ""
|
||||||
# logical_backup_cpu_limit: ""
|
# logical_backup_cpu_limit: ""
|
||||||
# logical_backup_cpu_request: ""
|
# logical_backup_cpu_request: ""
|
||||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1"
|
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.11.0"
|
||||||
# logical_backup_google_application_credentials: ""
|
# logical_backup_google_application_credentials: ""
|
||||||
logical_backup_job_prefix: "logical-backup-"
|
logical_backup_job_prefix: "logical-backup-"
|
||||||
# logical_backup_memory_limit: ""
|
# logical_backup_memory_limit: ""
|
||||||
|
|
@ -94,6 +96,7 @@ data:
|
||||||
logical_backup_s3_sse: "AES256"
|
logical_backup_s3_sse: "AES256"
|
||||||
# logical_backup_s3_retention_time: ""
|
# logical_backup_s3_retention_time: ""
|
||||||
logical_backup_schedule: "30 00 * * *"
|
logical_backup_schedule: "30 00 * * *"
|
||||||
|
# logical_backup_cronjob_environment_secret: ""
|
||||||
major_version_upgrade_mode: "manual"
|
major_version_upgrade_mode: "manual"
|
||||||
# major_version_upgrade_team_allow_list: ""
|
# major_version_upgrade_team_allow_list: ""
|
||||||
master_dns_name_format: "{cluster}.{namespace}.{hostedzone}"
|
master_dns_name_format: "{cluster}.{namespace}.{hostedzone}"
|
||||||
|
|
@ -105,7 +108,7 @@ data:
|
||||||
# max_memory_request: 4Gi
|
# max_memory_request: 4Gi
|
||||||
# min_cpu_limit: 250m
|
# min_cpu_limit: 250m
|
||||||
# min_memory_limit: 250Mi
|
# min_memory_limit: 250Mi
|
||||||
# minimal_major_version: "11"
|
# minimal_major_version: "12"
|
||||||
# node_readiness_label: "status:ready"
|
# node_readiness_label: "status:ready"
|
||||||
# node_readiness_label_merge: "OR"
|
# node_readiness_label_merge: "OR"
|
||||||
# oauth_token_secret_name: postgresql-operator
|
# oauth_token_secret_name: postgresql-operator
|
||||||
|
|
@ -117,6 +120,7 @@ data:
|
||||||
# password_rotation_interval: "90"
|
# password_rotation_interval: "90"
|
||||||
# password_rotation_user_retention: "180"
|
# password_rotation_user_retention: "180"
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
|
persistent_volume_claim_retention_policy: "when_deleted:retain,when_scaled:retain"
|
||||||
# pod_antiaffinity_preferred_during_scheduling: "false"
|
# pod_antiaffinity_preferred_during_scheduling: "false"
|
||||||
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
pod_deletion_wait_timeout: 10m
|
pod_deletion_wait_timeout: 10m
|
||||||
|
|
@ -154,7 +158,7 @@ data:
|
||||||
spilo_privileged: "false"
|
spilo_privileged: "false"
|
||||||
storage_resize_mode: "pvc"
|
storage_resize_mode: "pvc"
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
# target_major_version: "15"
|
# target_major_version: "16"
|
||||||
# team_admin_role: "admin"
|
# team_admin_role: "admin"
|
||||||
# team_api_role_configuration: "log_statement:all"
|
# team_api_role_configuration: "log_statement:all"
|
||||||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ spec:
|
||||||
serviceAccountName: postgres-operator
|
serviceAccountName: postgres-operator
|
||||||
containers:
|
containers:
|
||||||
- name: postgres-operator
|
- name: postgres-operator
|
||||||
image: registry.opensource.zalan.do/acid/pgbouncer:master-27
|
image: registry.opensource.zalan.do/acid/pgbouncer:master-32
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
|
|
|
||||||
|
|
@ -17,4 +17,4 @@ spec:
|
||||||
preparedDatabases:
|
preparedDatabases:
|
||||||
bar: {}
|
bar: {}
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "15"
|
version: "16"
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,7 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
docker_image:
|
docker_image:
|
||||||
type: string
|
type: string
|
||||||
default: "ghcr.io/zalando/spilo-15:3.0-p1"
|
default: "ghcr.io/zalando/spilo-16:3.2-p2"
|
||||||
enable_crd_registration:
|
enable_crd_registration:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
|
|
@ -165,10 +165,10 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
minimal_major_version:
|
minimal_major_version:
|
||||||
type: string
|
type: string
|
||||||
default: "11"
|
default: "12"
|
||||||
target_major_version:
|
target_major_version:
|
||||||
type: string
|
type: string
|
||||||
default: "15"
|
default: "16"
|
||||||
kubernetes:
|
kubernetes:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -203,9 +203,15 @@ spec:
|
||||||
enable_cross_namespace_secret:
|
enable_cross_namespace_secret:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
enable_finalizers:
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
enable_init_containers:
|
enable_init_containers:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
|
enable_persistent_volume_claim_deletion:
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
enable_pod_antiaffinity:
|
enable_pod_antiaffinity:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
@ -276,6 +282,19 @@ spec:
|
||||||
pdb_name_format:
|
pdb_name_format:
|
||||||
type: string
|
type: string
|
||||||
default: "postgres-{cluster}-pdb"
|
default: "postgres-{cluster}-pdb"
|
||||||
|
persistent_volume_claim_retention_policy:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
when_deleted:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- "delete"
|
||||||
|
- "retain"
|
||||||
|
when_scaled:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- "delete"
|
||||||
|
- "retain"
|
||||||
pod_antiaffinity_preferred_during_scheduling:
|
pod_antiaffinity_preferred_during_scheduling:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
@ -347,19 +366,15 @@ spec:
|
||||||
default_cpu_limit:
|
default_cpu_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "1"
|
|
||||||
default_cpu_request:
|
default_cpu_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "100m"
|
|
||||||
default_memory_limit:
|
default_memory_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "500Mi"
|
|
||||||
default_memory_request:
|
default_memory_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "100Mi"
|
|
||||||
max_cpu_request:
|
max_cpu_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
|
|
@ -369,11 +384,9 @@ spec:
|
||||||
min_cpu_limit:
|
min_cpu_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "250m"
|
|
||||||
min_memory_limit:
|
min_memory_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "250Mi"
|
|
||||||
timeouts:
|
timeouts:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -487,7 +500,7 @@ spec:
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
logical_backup_docker_image:
|
logical_backup_docker_image:
|
||||||
type: string
|
type: string
|
||||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1"
|
default: "registry.opensource.zalan.do/acid/logical-backup:v1.11.0"
|
||||||
logical_backup_google_application_credentials:
|
logical_backup_google_application_credentials:
|
||||||
type: string
|
type: string
|
||||||
logical_backup_job_prefix:
|
logical_backup_job_prefix:
|
||||||
|
|
@ -524,6 +537,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
|
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
|
||||||
default: "30 00 * * *"
|
default: "30 00 * * *"
|
||||||
|
logical_backup_cronjob_environment_secret:
|
||||||
|
type: string
|
||||||
debug:
|
debug:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -635,7 +650,7 @@ spec:
|
||||||
default: "pooler"
|
default: "pooler"
|
||||||
connection_pooler_image:
|
connection_pooler_image:
|
||||||
type: string
|
type: string
|
||||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
default: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
|
||||||
connection_pooler_max_db_connections:
|
connection_pooler_max_db_connections:
|
||||||
type: integer
|
type: integer
|
||||||
default: 60
|
default: 60
|
||||||
|
|
@ -652,19 +667,15 @@ spec:
|
||||||
connection_pooler_default_cpu_limit:
|
connection_pooler_default_cpu_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "1"
|
|
||||||
connection_pooler_default_cpu_request:
|
connection_pooler_default_cpu_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||||
default: "500m"
|
|
||||||
connection_pooler_default_memory_limit:
|
connection_pooler_default_memory_limit:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "100Mi"
|
|
||||||
connection_pooler_default_memory_request:
|
connection_pooler_default_memory_request:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
default: "100Mi"
|
|
||||||
patroni:
|
patroni:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ spec:
|
||||||
serviceAccountName: postgres-operator
|
serviceAccountName: postgres-operator
|
||||||
containers:
|
containers:
|
||||||
- name: postgres-operator
|
- name: postgres-operator
|
||||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.10.1
|
image: registry.opensource.zalan.do/acid/postgres-operator:v1.11.0
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ kind: OperatorConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
name: postgresql-operator-default-configuration
|
name: postgresql-operator-default-configuration
|
||||||
configuration:
|
configuration:
|
||||||
docker_image: ghcr.io/zalando/spilo-15:3.0-p1
|
docker_image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||||
# enable_crd_registration: true
|
# enable_crd_registration: true
|
||||||
# crd_categories:
|
# crd_categories:
|
||||||
# - all
|
# - all
|
||||||
|
|
@ -39,8 +39,8 @@ configuration:
|
||||||
major_version_upgrade_mode: "off"
|
major_version_upgrade_mode: "off"
|
||||||
# major_version_upgrade_team_allow_list:
|
# major_version_upgrade_team_allow_list:
|
||||||
# - acid
|
# - acid
|
||||||
minimal_major_version: "11"
|
minimal_major_version: "12"
|
||||||
target_major_version: "15"
|
target_major_version: "16"
|
||||||
kubernetes:
|
kubernetes:
|
||||||
# additional_pod_capabilities:
|
# additional_pod_capabilities:
|
||||||
# - "SYS_NICE"
|
# - "SYS_NICE"
|
||||||
|
|
@ -57,7 +57,9 @@ configuration:
|
||||||
# - deployment-time
|
# - deployment-time
|
||||||
# - downscaler/*
|
# - downscaler/*
|
||||||
# enable_cross_namespace_secret: "false"
|
# enable_cross_namespace_secret: "false"
|
||||||
|
enable_finalizers: false
|
||||||
enable_init_containers: true
|
enable_init_containers: true
|
||||||
|
enable_persistent_volume_claim_deletion: true
|
||||||
enable_pod_antiaffinity: false
|
enable_pod_antiaffinity: false
|
||||||
enable_pod_disruption_budget: true
|
enable_pod_disruption_budget: true
|
||||||
enable_readiness_probe: false
|
enable_readiness_probe: false
|
||||||
|
|
@ -84,6 +86,9 @@ configuration:
|
||||||
# node_readiness_label_merge: "OR"
|
# node_readiness_label_merge: "OR"
|
||||||
oauth_token_secret_name: postgresql-operator
|
oauth_token_secret_name: postgresql-operator
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
|
persistent_volume_claim_retention_policy:
|
||||||
|
when_deleted: "retain"
|
||||||
|
when_scaled: "retain"
|
||||||
pod_antiaffinity_preferred_during_scheduling: false
|
pod_antiaffinity_preferred_during_scheduling: false
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
# pod_environment_configmap: "default/my-custom-config"
|
# pod_environment_configmap: "default/my-custom-config"
|
||||||
|
|
@ -160,7 +165,7 @@ configuration:
|
||||||
# logical_backup_cpu_request: ""
|
# logical_backup_cpu_request: ""
|
||||||
# logical_backup_memory_limit: ""
|
# logical_backup_memory_limit: ""
|
||||||
# logical_backup_memory_request: ""
|
# logical_backup_memory_request: ""
|
||||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1"
|
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.11.0"
|
||||||
# logical_backup_google_application_credentials: ""
|
# logical_backup_google_application_credentials: ""
|
||||||
logical_backup_job_prefix: "logical-backup-"
|
logical_backup_job_prefix: "logical-backup-"
|
||||||
logical_backup_provider: "s3"
|
logical_backup_provider: "s3"
|
||||||
|
|
@ -172,6 +177,7 @@ configuration:
|
||||||
logical_backup_s3_sse: "AES256"
|
logical_backup_s3_sse: "AES256"
|
||||||
# logical_backup_s3_retention_time: ""
|
# logical_backup_s3_retention_time: ""
|
||||||
logical_backup_schedule: "30 00 * * *"
|
logical_backup_schedule: "30 00 * * *"
|
||||||
|
# logical_backup_cronjob_environment_secret: ""
|
||||||
debug:
|
debug:
|
||||||
debug_logging: true
|
debug_logging: true
|
||||||
enable_database_access: true
|
enable_database_access: true
|
||||||
|
|
@ -203,7 +209,7 @@ configuration:
|
||||||
connection_pooler_default_cpu_request: "500m"
|
connection_pooler_default_cpu_request: "500m"
|
||||||
connection_pooler_default_memory_limit: 100Mi
|
connection_pooler_default_memory_limit: 100Mi
|
||||||
connection_pooler_default_memory_request: 100Mi
|
connection_pooler_default_memory_request: 100Mi
|
||||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
|
||||||
# connection_pooler_max_db_connections: 60
|
# connection_pooler_max_db_connections: 60
|
||||||
connection_pooler_mode: "transaction"
|
connection_pooler_mode: "transaction"
|
||||||
connection_pooler_number_of_instances: 2
|
connection_pooler_number_of_instances: 2
|
||||||
|
|
|
||||||
|
|
@ -369,12 +369,12 @@ spec:
|
||||||
version:
|
version:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
- "10"
|
|
||||||
- "11"
|
- "11"
|
||||||
- "12"
|
- "12"
|
||||||
- "13"
|
- "13"
|
||||||
- "14"
|
- "14"
|
||||||
- "15"
|
- "15"
|
||||||
|
- "16"
|
||||||
parameters:
|
parameters:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
|
|
@ -439,6 +439,12 @@ spec:
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
# Note: the value specified here must not be zero or be higher
|
# Note: the value specified here must not be zero or be higher
|
||||||
# than the corresponding limit.
|
# than the corresponding limit.
|
||||||
|
hugepages-2Mi:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
|
hugepages-1Gi:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
requests:
|
requests:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -448,6 +454,12 @@ spec:
|
||||||
memory:
|
memory:
|
||||||
type: string
|
type: string
|
||||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
|
hugepages-2Mi:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
|
hugepages-1Gi:
|
||||||
|
type: string
|
||||||
|
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||||
schedulerName:
|
schedulerName:
|
||||||
type: string
|
type: string
|
||||||
serviceAnnotations:
|
serviceAnnotations:
|
||||||
|
|
@ -499,6 +511,8 @@ spec:
|
||||||
type: integer
|
type: integer
|
||||||
database:
|
database:
|
||||||
type: string
|
type: string
|
||||||
|
enableRecovery:
|
||||||
|
type: boolean
|
||||||
filter:
|
filter:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
|
|
@ -516,6 +530,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
payloadColumn:
|
payloadColumn:
|
||||||
type: string
|
type: string
|
||||||
|
recoveryEventType:
|
||||||
|
type: string
|
||||||
teamId:
|
teamId:
|
||||||
type: string
|
type: string
|
||||||
tls:
|
tls:
|
||||||
|
|
@ -594,6 +610,11 @@ spec:
|
||||||
- SUPERUSER
|
- SUPERUSER
|
||||||
- nosuperuser
|
- nosuperuser
|
||||||
- NOSUPERUSER
|
- NOSUPERUSER
|
||||||
|
usersIgnoringSecretRotation:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
usersWithInPlaceSecretRotation:
|
usersWithInPlaceSecretRotation:
|
||||||
type: array
|
type: array
|
||||||
nullable: true
|
nullable: true
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ spec:
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
numberOfInstances: 1
|
numberOfInstances: 1
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "15"
|
version: "16"
|
||||||
# Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming.
|
# Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming.
|
||||||
standby:
|
standby:
|
||||||
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"
|
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
// ClusterStatusUnknown etc : status of a Postgres cluster known to the operator
|
// ClusterStatusUnknown etc : status of a Postgres cluster known to the operator
|
||||||
const (
|
const (
|
||||||
ClusterStatusUnknown = ""
|
ClusterStatusUnknown = ""
|
||||||
ClusterStatusCreating = "Creating"
|
ClusterStatusCreating = "Creating"
|
||||||
|
|
|
||||||
|
|
@ -3,10 +3,11 @@ package v1
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do"
|
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
|
||||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CRDResource* define names necesssary for the k8s CRD API
|
// CRDResource* define names necesssary for the k8s CRD API
|
||||||
|
|
@ -588,9 +589,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"version": {
|
"version": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Enum: []apiextv1.JSON{
|
Enum: []apiextv1.JSON{
|
||||||
{
|
|
||||||
Raw: []byte(`"10"`),
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Raw: []byte(`"11"`),
|
Raw: []byte(`"11"`),
|
||||||
},
|
},
|
||||||
|
|
@ -606,6 +604,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
{
|
{
|
||||||
Raw: []byte(`"15"`),
|
Raw: []byte(`"15"`),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Raw: []byte(`"16"`),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
|
|
@ -684,6 +685,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
},
|
},
|
||||||
|
"hugepages-2Mi": {
|
||||||
|
Type: "string",
|
||||||
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
|
},
|
||||||
|
"hugepages-1Gi": {
|
||||||
|
Type: "string",
|
||||||
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"requests": {
|
"requests": {
|
||||||
|
|
@ -697,6 +706,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
},
|
},
|
||||||
|
"hugepages-2Mi": {
|
||||||
|
Type: "string",
|
||||||
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
|
},
|
||||||
|
"hugepages-1Gi": {
|
||||||
|
Type: "string",
|
||||||
|
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -769,6 +786,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"database": {
|
"database": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
"enableRecovery": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
"filter": {
|
"filter": {
|
||||||
Type: "object",
|
Type: "object",
|
||||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||||
|
|
@ -793,6 +813,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"payloadColumn": {
|
"payloadColumn": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
"recoveryEventType": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -973,6 +996,15 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"usersIgnoringSecretRotation": {
|
||||||
|
Type: "array",
|
||||||
|
Nullable: true,
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"usersWithInPlaceSecretRotation": {
|
"usersWithInPlaceSecretRotation": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
Nullable: true,
|
Nullable: true,
|
||||||
|
|
@ -1282,9 +1314,15 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"enable_cross_namespace_secret": {
|
"enable_cross_namespace_secret": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
"enable_finalizers": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
"enable_init_containers": {
|
"enable_init_containers": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
"enable_persistent_volume_claim_deletion": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
"enable_pod_antiaffinity": {
|
"enable_pod_antiaffinity": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
|
@ -1388,6 +1426,36 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"pdb_name_format": {
|
"pdb_name_format": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
"pdb_master_label_selector": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
|
"persistent_volume_claim_retention_policy": {
|
||||||
|
Type: "object",
|
||||||
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
"when_deleted": {
|
||||||
|
Type: "string",
|
||||||
|
Enum: []apiextv1.JSON{
|
||||||
|
{
|
||||||
|
Raw: []byte(`"delete"`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Raw: []byte(`"retain"`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"when_scaled": {
|
||||||
|
Type: "string",
|
||||||
|
Enum: []apiextv1.JSON{
|
||||||
|
{
|
||||||
|
Raw: []byte(`"delete"`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Raw: []byte(`"retain"`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"pod_antiaffinity_preferred_during_scheduling": {
|
"pod_antiaffinity_preferred_during_scheduling": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
|
@ -1713,6 +1781,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$",
|
Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$",
|
||||||
},
|
},
|
||||||
|
"logical_backup_cronjob_environment_secret": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"debug": {
|
"debug": {
|
||||||
|
|
|
||||||
|
|
@ -49,8 +49,8 @@ type PostgresUsersConfiguration struct {
|
||||||
type MajorVersionUpgradeConfiguration struct {
|
type MajorVersionUpgradeConfiguration struct {
|
||||||
MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"off"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
|
MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"off"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
|
||||||
MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"`
|
MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"`
|
||||||
MinimalMajorVersion string `json:"minimal_major_version" default:"11"`
|
MinimalMajorVersion string `json:"minimal_major_version" default:"12"`
|
||||||
TargetMajorVersion string `json:"target_major_version" default:"15"`
|
TargetMajorVersion string `json:"target_major_version" default:"16"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
|
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
|
||||||
|
|
@ -68,6 +68,7 @@ type KubernetesMetaConfiguration struct {
|
||||||
AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"`
|
AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"`
|
||||||
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
||||||
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
||||||
|
PDBMasterLabelSelector *bool `json:"pdb_master_label_selector,omitempty"`
|
||||||
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
|
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
|
||||||
StorageResizeMode string `json:"storage_resize_mode,omitempty"`
|
StorageResizeMode string `json:"storage_resize_mode,omitempty"`
|
||||||
EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
|
EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
|
||||||
|
|
@ -100,8 +101,11 @@ type KubernetesMetaConfiguration struct {
|
||||||
PodAntiAffinityPreferredDuringScheduling bool `json:"pod_antiaffinity_preferred_during_scheduling,omitempty"`
|
PodAntiAffinityPreferredDuringScheduling bool `json:"pod_antiaffinity_preferred_during_scheduling,omitempty"`
|
||||||
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
|
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
|
||||||
PodManagementPolicy string `json:"pod_management_policy,omitempty"`
|
PodManagementPolicy string `json:"pod_management_policy,omitempty"`
|
||||||
|
PersistentVolumeClaimRetentionPolicy map[string]string `json:"persistent_volume_claim_retention_policy,omitempty"`
|
||||||
|
EnablePersistentVolumeClaimDeletion *bool `json:"enable_persistent_volume_claim_deletion,omitempty"`
|
||||||
EnableReadinessProbe bool `json:"enable_readiness_probe,omitempty"`
|
EnableReadinessProbe bool `json:"enable_readiness_probe,omitempty"`
|
||||||
EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"`
|
EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"`
|
||||||
|
EnableFinalizers *bool `json:"enable_finalizers,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgresPodResourcesDefaults defines the spec of default resources
|
// PostgresPodResourcesDefaults defines the spec of default resources
|
||||||
|
|
@ -232,6 +236,7 @@ type OperatorLogicalBackupConfiguration struct {
|
||||||
RetentionTime string `json:"logical_backup_s3_retention_time,omitempty"`
|
RetentionTime string `json:"logical_backup_s3_retention_time,omitempty"`
|
||||||
GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"`
|
GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"`
|
||||||
JobPrefix string `json:"logical_backup_job_prefix,omitempty"`
|
JobPrefix string `json:"logical_backup_job_prefix,omitempty"`
|
||||||
|
CronjobEnvironmentSecret string `json:"logical_backup_cronjob_environment_secret,omitempty"`
|
||||||
CPURequest string `json:"logical_backup_cpu_request,omitempty"`
|
CPURequest string `json:"logical_backup_cpu_request,omitempty"`
|
||||||
MemoryRequest string `json:"logical_backup_memory_request,omitempty"`
|
MemoryRequest string `json:"logical_backup_memory_request,omitempty"`
|
||||||
CPULimit string `json:"logical_backup_cpu_limit,omitempty"`
|
CPULimit string `json:"logical_backup_cpu_limit,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -59,6 +59,7 @@ type PostgresSpec struct {
|
||||||
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
||||||
|
|
||||||
Users map[string]UserFlags `json:"users,omitempty"`
|
Users map[string]UserFlags `json:"users,omitempty"`
|
||||||
|
UsersIgnoringSecretRotation []string `json:"usersIgnoringSecretRotation,omitempty"`
|
||||||
UsersWithSecretRotation []string `json:"usersWithSecretRotation,omitempty"`
|
UsersWithSecretRotation []string `json:"usersWithSecretRotation,omitempty"`
|
||||||
UsersWithInPlaceSecretRotation []string `json:"usersWithInPlaceSecretRotation,omitempty"`
|
UsersWithInPlaceSecretRotation []string `json:"usersWithInPlaceSecretRotation,omitempty"`
|
||||||
|
|
||||||
|
|
@ -153,8 +154,10 @@ type PostgresqlParam struct {
|
||||||
|
|
||||||
// ResourceDescription describes CPU and memory resources defined for a cluster.
|
// ResourceDescription describes CPU and memory resources defined for a cluster.
|
||||||
type ResourceDescription struct {
|
type ResourceDescription struct {
|
||||||
CPU string `json:"cpu"`
|
CPU *string `json:"cpu,omitempty"`
|
||||||
Memory string `json:"memory"`
|
Memory *string `json:"memory,omitempty"`
|
||||||
|
HugePages2Mi *string `json:"hugepages-2Mi,omitempty"`
|
||||||
|
HugePages1Gi *string `json:"hugepages-1Gi,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resources describes requests and limits for the cluster resouces.
|
// Resources describes requests and limits for the cluster resouces.
|
||||||
|
|
@ -247,16 +250,18 @@ type ConnectionPooler struct {
|
||||||
|
|
||||||
// Stream defines properties for creating FabricEventStream resources
|
// Stream defines properties for creating FabricEventStream resources
|
||||||
type Stream struct {
|
type Stream struct {
|
||||||
ApplicationId string `json:"applicationId"`
|
ApplicationId string `json:"applicationId"`
|
||||||
Database string `json:"database"`
|
Database string `json:"database"`
|
||||||
Tables map[string]StreamTable `json:"tables"`
|
Tables map[string]StreamTable `json:"tables"`
|
||||||
Filter map[string]*string `json:"filter,omitempty"`
|
Filter map[string]*string `json:"filter,omitempty"`
|
||||||
BatchSize *uint32 `json:"batchSize,omitempty"`
|
BatchSize *uint32 `json:"batchSize,omitempty"`
|
||||||
|
EnableRecovery *bool `json:"enableRecovery,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// StreamTable defines properties of outbox tables for FabricEventStreams
|
// StreamTable defines properties of outbox tables for FabricEventStreams
|
||||||
type StreamTable struct {
|
type StreamTable struct {
|
||||||
EventType string `json:"eventType"`
|
EventType string `json:"eventType"`
|
||||||
IdColumn *string `json:"idColumn,omitempty"`
|
RecoveryEventType string `json:"recoveryEventType,omitempty"`
|
||||||
PayloadColumn *string `json:"payloadColumn,omitempty"`
|
IdColumn *string `json:"idColumn,omitempty"`
|
||||||
|
PayloadColumn *string `json:"payloadColumn,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,10 @@ var parseTimeTests = []struct {
|
||||||
{"expect error as minute is out of range", "23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)},
|
{"expect error as minute is out of range", "23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func stringToPointer(str string) *string {
|
||||||
|
return &str
|
||||||
|
}
|
||||||
|
|
||||||
var parseWeekdayTests = []struct {
|
var parseWeekdayTests = []struct {
|
||||||
about string
|
about string
|
||||||
in string
|
in string
|
||||||
|
|
@ -213,7 +217,7 @@ var unmarshalCluster = []struct {
|
||||||
"127.0.0.1/32"
|
"127.0.0.1/32"
|
||||||
],
|
],
|
||||||
"postgresql": {
|
"postgresql": {
|
||||||
"version": "15",
|
"version": "16",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"shared_buffers": "32MB",
|
"shared_buffers": "32MB",
|
||||||
"max_connections": "10",
|
"max_connections": "10",
|
||||||
|
|
@ -273,7 +277,7 @@ var unmarshalCluster = []struct {
|
||||||
},
|
},
|
||||||
Spec: PostgresSpec{
|
Spec: PostgresSpec{
|
||||||
PostgresqlParam: PostgresqlParam{
|
PostgresqlParam: PostgresqlParam{
|
||||||
PgVersion: "15",
|
PgVersion: "16",
|
||||||
Parameters: map[string]string{
|
Parameters: map[string]string{
|
||||||
"shared_buffers": "32MB",
|
"shared_buffers": "32MB",
|
||||||
"max_connections": "10",
|
"max_connections": "10",
|
||||||
|
|
@ -301,8 +305,8 @@ var unmarshalCluster = []struct {
|
||||||
Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}},
|
Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}},
|
||||||
},
|
},
|
||||||
Resources: &Resources{
|
Resources: &Resources{
|
||||||
ResourceRequests: ResourceDescription{CPU: "10m", Memory: "50Mi"},
|
ResourceRequests: ResourceDescription{CPU: stringToPointer("10m"), Memory: stringToPointer("50Mi")},
|
||||||
ResourceLimits: ResourceDescription{CPU: "300m", Memory: "3000Mi"},
|
ResourceLimits: ResourceDescription{CPU: stringToPointer("300m"), Memory: stringToPointer("3000Mi")},
|
||||||
},
|
},
|
||||||
|
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
|
|
@ -333,7 +337,7 @@ var unmarshalCluster = []struct {
|
||||||
},
|
},
|
||||||
Error: "",
|
Error: "",
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"15","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"16","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
{
|
{
|
||||||
about: "example with clone",
|
about: "example with clone",
|
||||||
|
|
@ -398,7 +402,7 @@ var postgresqlList = []struct {
|
||||||
out PostgresqlList
|
out PostgresqlList
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"15"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"16"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
||||||
PostgresqlList{
|
PostgresqlList{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "List",
|
Kind: "List",
|
||||||
|
|
@ -419,7 +423,7 @@ var postgresqlList = []struct {
|
||||||
},
|
},
|
||||||
Spec: PostgresSpec{
|
Spec: PostgresSpec{
|
||||||
ClusterName: "testcluster42",
|
ClusterName: "testcluster42",
|
||||||
PostgresqlParam: PostgresqlParam{PgVersion: "15"},
|
PostgresqlParam: PostgresqlParam{PgVersion: "16"},
|
||||||
Volume: Volume{Size: "10Gi"},
|
Volume: Volume{Size: "10Gi"},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
AllowedSourceRanges: []string{"185.85.220.0/22"},
|
AllowedSourceRanges: []string{"185.85.220.0/22"},
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -109,7 +109,7 @@ func (in *ConnectionPooler) DeepCopyInto(out *ConnectionPooler) {
|
||||||
if in.Resources != nil {
|
if in.Resources != nil {
|
||||||
in, out := &in.Resources, &out.Resources
|
in, out := &in.Resources, &out.Resources
|
||||||
*out = new(Resources)
|
*out = new(Resources)
|
||||||
**out = **in
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -178,6 +178,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
if in.PDBMasterLabelSelector != nil {
|
||||||
|
in, out := &in.PDBMasterLabelSelector, &out.PDBMasterLabelSelector
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.EnablePodDisruptionBudget != nil {
|
if in.EnablePodDisruptionBudget != nil {
|
||||||
in, out := &in.EnablePodDisruptionBudget, &out.EnablePodDisruptionBudget
|
in, out := &in.EnablePodDisruptionBudget, &out.EnablePodDisruptionBudget
|
||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
|
|
@ -260,6 +265,23 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out.PodEnvironmentConfigMap = in.PodEnvironmentConfigMap
|
out.PodEnvironmentConfigMap = in.PodEnvironmentConfigMap
|
||||||
|
if in.PersistentVolumeClaimRetentionPolicy != nil {
|
||||||
|
in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.EnablePersistentVolumeClaimDeletion != nil {
|
||||||
|
in, out := &in.EnablePersistentVolumeClaimDeletion, &out.EnablePersistentVolumeClaimDeletion
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EnableFinalizers != nil {
|
||||||
|
in, out := &in.EnableFinalizers, &out.EnableFinalizers
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -619,7 +641,7 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
if in.Resources != nil {
|
if in.Resources != nil {
|
||||||
in, out := &in.Resources, &out.Resources
|
in, out := &in.Resources, &out.Resources
|
||||||
*out = new(Resources)
|
*out = new(Resources)
|
||||||
**out = **in
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.EnableConnectionPooler != nil {
|
if in.EnableConnectionPooler != nil {
|
||||||
in, out := &in.EnableConnectionPooler, &out.EnableConnectionPooler
|
in, out := &in.EnableConnectionPooler, &out.EnableConnectionPooler
|
||||||
|
|
@ -701,6 +723,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
(*out)[key] = outVal
|
(*out)[key] = outVal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.UsersIgnoringSecretRotation != nil {
|
||||||
|
in, out := &in.UsersIgnoringSecretRotation, &out.UsersIgnoringSecretRotation
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.UsersWithSecretRotation != nil {
|
if in.UsersWithSecretRotation != nil {
|
||||||
in, out := &in.UsersWithSecretRotation, &out.UsersWithSecretRotation
|
in, out := &in.UsersWithSecretRotation, &out.UsersWithSecretRotation
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
|
|
@ -1148,6 +1175,26 @@ func (in *PreparedSchema) DeepCopy() *PreparedSchema {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
|
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.CPU != nil {
|
||||||
|
in, out := &in.CPU, &out.CPU
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Memory != nil {
|
||||||
|
in, out := &in.Memory, &out.Memory
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.HugePages2Mi != nil {
|
||||||
|
in, out := &in.HugePages2Mi, &out.HugePages2Mi
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.HugePages1Gi != nil {
|
||||||
|
in, out := &in.HugePages1Gi, &out.HugePages1Gi
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1164,8 +1211,8 @@ func (in *ResourceDescription) DeepCopy() *ResourceDescription {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Resources) DeepCopyInto(out *Resources) {
|
func (in *Resources) DeepCopyInto(out *Resources) {
|
||||||
*out = *in
|
*out = *in
|
||||||
out.ResourceRequests = in.ResourceRequests
|
in.ResourceRequests.DeepCopyInto(&out.ResourceRequests)
|
||||||
out.ResourceLimits = in.ResourceLimits
|
in.ResourceLimits.DeepCopyInto(&out.ResourceLimits)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1201,7 +1248,7 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) {
|
||||||
if in.Resources != nil {
|
if in.Resources != nil {
|
||||||
in, out := &in.Resources, &out.Resources
|
in, out := &in.Resources, &out.Resources
|
||||||
*out = new(Resources)
|
*out = new(Resources)
|
||||||
**out = **in
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
if in.Ports != nil {
|
if in.Ports != nil {
|
||||||
in, out := &in.Ports, &out.Ports
|
in, out := &in.Ports, &out.Ports
|
||||||
|
|
@ -1274,6 +1321,11 @@ func (in *Stream) DeepCopyInto(out *Stream) {
|
||||||
*out = new(uint32)
|
*out = new(uint32)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.EnableRecovery != nil {
|
||||||
|
in, out := &in.EnableRecovery, &out.EnableRecovery
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,9 +33,10 @@ type FabricEventStreamList struct {
|
||||||
|
|
||||||
// EventStream defines the source, flow and sink of the event stream
|
// EventStream defines the source, flow and sink of the event stream
|
||||||
type EventStream struct {
|
type EventStream struct {
|
||||||
EventStreamFlow EventStreamFlow `json:"flow"`
|
EventStreamFlow EventStreamFlow `json:"flow"`
|
||||||
EventStreamSink EventStreamSink `json:"sink"`
|
EventStreamSink EventStreamSink `json:"sink"`
|
||||||
EventStreamSource EventStreamSource `json:"source"`
|
EventStreamSource EventStreamSource `json:"source"`
|
||||||
|
EventStreamRecovery EventStreamRecovery `json:"recovery"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventStreamFlow defines the flow characteristics of the event stream
|
// EventStreamFlow defines the flow characteristics of the event stream
|
||||||
|
|
@ -51,6 +52,12 @@ type EventStreamSink struct {
|
||||||
MaxBatchSize *uint32 `json:"maxBatchSize,omitempty"`
|
MaxBatchSize *uint32 `json:"maxBatchSize,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EventStreamRecovery defines the target of dead letter queue
|
||||||
|
type EventStreamRecovery struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Sink *EventStreamSink `json:"sink"`
|
||||||
|
}
|
||||||
|
|
||||||
// EventStreamSource defines the source of the event stream and connection for FES operator
|
// EventStreamSource defines the source of the event stream and connection for FES operator
|
||||||
type EventStreamSource struct {
|
type EventStreamSource struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,12 @@ import (
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Connection) DeepCopyInto(out *Connection) {
|
func (in *Connection) DeepCopyInto(out *Connection) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.PublicationName != nil {
|
||||||
|
in, out := &in.PublicationName, &out.PublicationName
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
in.DBAuth.DeepCopyInto(&out.DBAuth)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -65,6 +71,10 @@ func (in *DBAuth) DeepCopy() *DBAuth {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *EventStream) DeepCopyInto(out *EventStream) {
|
func (in *EventStream) DeepCopyInto(out *EventStream) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
in.EventStreamFlow.DeepCopyInto(&out.EventStreamFlow)
|
||||||
|
in.EventStreamRecovery.DeepCopyInto(&out.EventStreamRecovery)
|
||||||
|
in.EventStreamSink.DeepCopyInto(&out.EventStreamSink)
|
||||||
|
in.EventStreamSource.DeepCopyInto(&out.EventStreamSource)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -81,6 +91,11 @@ func (in *EventStream) DeepCopy() *EventStream {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *EventStreamFlow) DeepCopyInto(out *EventStreamFlow) {
|
func (in *EventStreamFlow) DeepCopyInto(out *EventStreamFlow) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.PayloadColumn != nil {
|
||||||
|
in, out := &in.PayloadColumn, &out.PayloadColumn
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -94,9 +109,35 @@ func (in *EventStreamFlow) DeepCopy() *EventStreamFlow {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *EventStreamRecovery) DeepCopyInto(out *EventStreamRecovery) {
|
||||||
|
*out = *in
|
||||||
|
if in.Sink != nil {
|
||||||
|
in, out := &in.Sink, &out.Sink
|
||||||
|
*out = new(EventStreamSink)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *EventStreamRecovery) DeepCopy() *EventStreamRecovery {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(EventStreamRecovery)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *EventStreamSink) DeepCopyInto(out *EventStreamSink) {
|
func (in *EventStreamSink) DeepCopyInto(out *EventStreamSink) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.MaxBatchSize != nil {
|
||||||
|
in, out := &in.MaxBatchSize, &out.MaxBatchSize
|
||||||
|
*out = new(uint32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -113,6 +154,13 @@ func (in *EventStreamSink) DeepCopy() *EventStreamSink {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *EventStreamSource) DeepCopyInto(out *EventStreamSource) {
|
func (in *EventStreamSource) DeepCopyInto(out *EventStreamSource) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
in.Connection.DeepCopyInto(&out.Connection)
|
||||||
|
if in.Filter != nil {
|
||||||
|
in, out := &in.Filter, &out.Filter
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
in.EventStreamTable.DeepCopyInto(&out.EventStreamTable)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -129,6 +177,11 @@ func (in *EventStreamSource) DeepCopy() *EventStreamSource {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *EventStreamTable) DeepCopyInto(out *EventStreamTable) {
|
func (in *EventStreamTable) DeepCopyInto(out *EventStreamTable) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.IDColumn != nil {
|
||||||
|
in, out := &in.IDColumn, &out.IDColumn
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,7 @@ var (
|
||||||
databaseNameRegexp = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
databaseNameRegexp = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||||
userRegexp = regexp.MustCompile(`^[a-z0-9]([-_a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-_a-z0-9]*[a-z0-9])?)*$`)
|
userRegexp = regexp.MustCompile(`^[a-z0-9]([-_a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-_a-z0-9]*[a-z0-9])?)*$`)
|
||||||
patroniObjectSuffixes = []string{"leader", "config", "sync", "failover"}
|
patroniObjectSuffixes = []string{"leader", "config", "sync", "failover"}
|
||||||
|
finalizerName = "postgres-operator.acid.zalan.do"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication.
|
// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication.
|
||||||
|
|
@ -246,20 +247,42 @@ func (c *Cluster) Create() (err error) {
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
service *v1.Service
|
pgCreateStatus *acidv1.Postgresql
|
||||||
ep *v1.Endpoints
|
service *v1.Service
|
||||||
ss *appsv1.StatefulSet
|
ep *v1.Endpoints
|
||||||
|
ss *appsv1.StatefulSet
|
||||||
)
|
)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
var (
|
||||||
|
pgUpdatedStatus *acidv1.Postgresql
|
||||||
|
errStatus error
|
||||||
|
)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running?
|
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running?
|
||||||
} else {
|
} else {
|
||||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed)
|
c.logger.Warningf("cluster created failed: %v", err)
|
||||||
|
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed)
|
||||||
|
}
|
||||||
|
if errStatus != nil {
|
||||||
|
c.logger.Warningf("could not set cluster status: %v", errStatus)
|
||||||
|
}
|
||||||
|
if pgUpdatedStatus != nil {
|
||||||
|
c.setSpec(pgUpdatedStatus)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating)
|
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not set cluster status: %v", err)
|
||||||
|
}
|
||||||
|
c.setSpec(pgCreateStatus)
|
||||||
|
|
||||||
|
if c.OpConfig.EnableFinalizers != nil && *c.OpConfig.EnableFinalizers {
|
||||||
|
if err = c.addFinalizer(); err != nil {
|
||||||
|
return fmt.Errorf("could not add finalizer: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources")
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources")
|
||||||
|
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
|
|
@ -409,6 +432,12 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
||||||
reasons = append(reasons, "new statefulset's pod management policy do not match")
|
reasons = append(reasons, "new statefulset's pod management policy do not match")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(c.Statefulset.Spec.PersistentVolumeClaimRetentionPolicy, statefulSet.Spec.PersistentVolumeClaimRetentionPolicy) {
|
||||||
|
match = false
|
||||||
|
needsReplace = true
|
||||||
|
reasons = append(reasons, "new statefulset's persistent volume claim retention policy do not match")
|
||||||
|
}
|
||||||
|
|
||||||
needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons)
|
needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons)
|
||||||
needsRollUpdate, reasons = c.compareContainers("containers", c.Statefulset.Spec.Template.Spec.Containers, statefulSet.Spec.Template.Spec.Containers, needsRollUpdate, reasons)
|
needsRollUpdate, reasons = c.compareContainers("containers", c.Statefulset.Spec.Template.Spec.Containers, statefulSet.Spec.Template.Spec.Containers, needsRollUpdate, reasons)
|
||||||
|
|
||||||
|
|
@ -474,23 +503,24 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
||||||
if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) {
|
if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) {
|
||||||
needsReplace = true
|
needsReplace = true
|
||||||
reasons = append(reasons, "new statefulset's volumeClaimTemplates contains different number of volumes to the old one")
|
reasons = append(reasons, "new statefulset's volumeClaimTemplates contains different number of volumes to the old one")
|
||||||
}
|
} else {
|
||||||
for i := 0; i < len(c.Statefulset.Spec.VolumeClaimTemplates); i++ {
|
for i := 0; i < len(c.Statefulset.Spec.VolumeClaimTemplates); i++ {
|
||||||
name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name
|
|
||||||
// Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta
|
|
||||||
if name != statefulSet.Spec.VolumeClaimTemplates[i].Name {
|
|
||||||
needsReplace = true
|
|
||||||
reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) {
|
|
||||||
needsReplace = true
|
|
||||||
reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q does not match the current one", name))
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) {
|
|
||||||
name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name
|
name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name
|
||||||
needsReplace = true
|
// Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta
|
||||||
reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q does not match the current one", name))
|
if name != statefulSet.Spec.VolumeClaimTemplates[i].Name {
|
||||||
|
needsReplace = true
|
||||||
|
reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) {
|
||||||
|
needsReplace = true
|
||||||
|
reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q does not match the current one", name))
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) {
|
||||||
|
name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name
|
||||||
|
needsReplace = true
|
||||||
|
reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q does not match the current one", name))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -756,6 +786,54 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) {
|
||||||
return true, ""
|
return true, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addFinalizer patches the postgresql CR to add finalizer
|
||||||
|
func (c *Cluster) addFinalizer() error {
|
||||||
|
if c.hasFinalizer() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Infof("adding finalizer %s", finalizerName)
|
||||||
|
finalizers := append(c.ObjectMeta.Finalizers, finalizerName)
|
||||||
|
newSpec, err := c.KubeClient.SetFinalizer(c.clusterName(), c.DeepCopy(), finalizers)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error adding finalizer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// update the spec, maintaining the new resourceVersion
|
||||||
|
c.setSpec(newSpec)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeFinalizer patches postgresql CR to remove finalizer
|
||||||
|
func (c *Cluster) removeFinalizer() error {
|
||||||
|
if !c.hasFinalizer() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Infof("removing finalizer %s", finalizerName)
|
||||||
|
finalizers := util.RemoveString(c.ObjectMeta.Finalizers, finalizerName)
|
||||||
|
newSpec, err := c.KubeClient.SetFinalizer(c.clusterName(), c.DeepCopy(), finalizers)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error removing finalizer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// update the spec, maintaining the new resourceVersion.
|
||||||
|
c.setSpec(newSpec)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasFinalizer checks if finalizer is currently set or not
|
||||||
|
func (c *Cluster) hasFinalizer() bool {
|
||||||
|
for _, finalizer := range c.ObjectMeta.Finalizers {
|
||||||
|
if finalizer == finalizerName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// Update changes Kubernetes objects according to the new specification. Unlike the sync case, the missing object
|
// Update changes Kubernetes objects according to the new specification. Unlike the sync case, the missing object
|
||||||
// (i.e. service) is treated as an error
|
// (i.e. service) is treated as an error
|
||||||
// logical backup cron jobs are an exception: a user-initiated Update can enable a logical backup job
|
// logical backup cron jobs are an exception: a user-initiated Update can enable a logical backup job
|
||||||
|
|
@ -772,10 +850,20 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
c.setSpec(newSpec)
|
c.setSpec(newSpec)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
var (
|
||||||
|
pgUpdatedStatus *acidv1.Postgresql
|
||||||
|
err error
|
||||||
|
)
|
||||||
if updateFailed {
|
if updateFailed {
|
||||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed)
|
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed)
|
||||||
} else {
|
} else {
|
||||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
|
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Warningf("could not set cluster status: %v", err)
|
||||||
|
}
|
||||||
|
if pgUpdatedStatus != nil {
|
||||||
|
c.setSpec(pgUpdatedStatus)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
@ -873,6 +961,13 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// add or remove standby_cluster section from Patroni config depending on changes in standby section
|
||||||
|
if !reflect.DeepEqual(oldSpec.Spec.StandbyCluster, newSpec.Spec.StandbyCluster) {
|
||||||
|
if err := c.syncStandbyClusterConfiguration(); err != nil {
|
||||||
|
return fmt.Errorf("could not set StandbyCluster configuration options: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// pod disruption budget
|
// pod disruption budget
|
||||||
if oldSpec.Spec.NumberOfInstances != newSpec.Spec.NumberOfInstances {
|
if oldSpec.Spec.NumberOfInstances != newSpec.Spec.NumberOfInstances {
|
||||||
c.logger.Debug("syncing pod disruption budgets")
|
c.logger.Debug("syncing pod disruption budgets")
|
||||||
|
|
@ -991,48 +1086,65 @@ func syncResources(a, b *v1.ResourceRequirements) bool {
|
||||||
// DCS, reuses the master's endpoint to store the leader related metadata. If we remove the endpoint
|
// DCS, reuses the master's endpoint to store the leader related metadata. If we remove the endpoint
|
||||||
// before the pods, it will be re-created by the current master pod and will remain, obstructing the
|
// before the pods, it will be re-created by the current master pod and will remain, obstructing the
|
||||||
// creation of the new cluster with the same name. Therefore, the endpoints should be deleted last.
|
// creation of the new cluster with the same name. Therefore, the endpoints should be deleted last.
|
||||||
func (c *Cluster) Delete() {
|
func (c *Cluster) Delete() error {
|
||||||
|
var anyErrors = false
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Delete", "Started deletion of new cluster resources")
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Delete", "Started deletion of cluster resources")
|
||||||
|
|
||||||
if err := c.deleteStreams(); err != nil {
|
if err := c.deleteStreams(); err != nil {
|
||||||
|
anyErrors = true
|
||||||
c.logger.Warningf("could not delete event streams: %v", err)
|
c.logger.Warningf("could not delete event streams: %v", err)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete event streams: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete the backup job before the stateful set of the cluster to prevent connections to non-existing pods
|
// delete the backup job before the stateful set of the cluster to prevent connections to non-existing pods
|
||||||
// deleting the cron job also removes pods and batch jobs it created
|
// deleting the cron job also removes pods and batch jobs it created
|
||||||
if err := c.deleteLogicalBackupJob(); err != nil {
|
if err := c.deleteLogicalBackupJob(); err != nil {
|
||||||
|
anyErrors = true
|
||||||
c.logger.Warningf("could not remove the logical backup k8s cron job; %v", err)
|
c.logger.Warningf("could not remove the logical backup k8s cron job; %v", err)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not remove the logical backup k8s cron job; %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.deleteStatefulSet(); err != nil {
|
if err := c.deleteStatefulSet(); err != nil {
|
||||||
|
anyErrors = true
|
||||||
c.logger.Warningf("could not delete statefulset: %v", err)
|
c.logger.Warningf("could not delete statefulset: %v", err)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete statefulset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.deleteSecrets(); err != nil {
|
if err := c.deleteSecrets(); err != nil {
|
||||||
|
anyErrors = true
|
||||||
c.logger.Warningf("could not delete secrets: %v", err)
|
c.logger.Warningf("could not delete secrets: %v", err)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete secrets: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.deletePodDisruptionBudget(); err != nil {
|
if err := c.deletePodDisruptionBudget(); err != nil {
|
||||||
|
anyErrors = true
|
||||||
c.logger.Warningf("could not delete pod disruption budget: %v", err)
|
c.logger.Warningf("could not delete pod disruption budget: %v", err)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budget: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
|
|
||||||
if !c.patroniKubernetesUseConfigMaps() {
|
if !c.patroniKubernetesUseConfigMaps() {
|
||||||
if err := c.deleteEndpoint(role); err != nil {
|
if err := c.deleteEndpoint(role); err != nil {
|
||||||
|
anyErrors = true
|
||||||
c.logger.Warningf("could not delete %s endpoint: %v", role, err)
|
c.logger.Warningf("could not delete %s endpoint: %v", role, err)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete %s endpoint: %v", role, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.deleteService(role); err != nil {
|
if err := c.deleteService(role); err != nil {
|
||||||
|
anyErrors = true
|
||||||
c.logger.Warningf("could not delete %s service: %v", role, err)
|
c.logger.Warningf("could not delete %s service: %v", role, err)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete %s service: %v", role, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.deletePatroniClusterObjects(); err != nil {
|
if err := c.deletePatroniClusterObjects(); err != nil {
|
||||||
|
anyErrors = true
|
||||||
c.logger.Warningf("could not remove leftover patroni objects; %v", err)
|
c.logger.Warningf("could not remove leftover patroni objects; %v", err)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not remove leftover patroni objects; %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete connection pooler objects anyway, even if it's not mentioned in the
|
// Delete connection pooler objects anyway, even if it's not mentioned in the
|
||||||
|
|
@ -1040,10 +1152,22 @@ func (c *Cluster) Delete() {
|
||||||
// wrong
|
// wrong
|
||||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||||
if err := c.deleteConnectionPooler(role); err != nil {
|
if err := c.deleteConnectionPooler(role); err != nil {
|
||||||
|
anyErrors = true
|
||||||
c.logger.Warningf("could not remove connection pooler: %v", err)
|
c.logger.Warningf("could not remove connection pooler: %v", err)
|
||||||
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not remove connection pooler: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we are done deleting our various resources we remove the finalizer to let K8S finally delete the Postgres CR
|
||||||
|
if anyErrors {
|
||||||
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeWarning, "Delete", "some resources could be successfully deleted yet")
|
||||||
|
return fmt.Errorf("some error(s) occured when deleting resources, NOT removing finalizer yet")
|
||||||
|
}
|
||||||
|
if err := c.removeFinalizer(); err != nil {
|
||||||
|
return fmt.Errorf("done cleaning up, but error when removing finalizer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status).
|
// NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status).
|
||||||
|
|
@ -1061,7 +1185,7 @@ func (c *Cluster) ReceivePodEvent(event PodEvent) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) processPodEvent(obj interface{}) error {
|
func (c *Cluster) processPodEvent(obj interface{}, isInInitialList bool) error {
|
||||||
event, ok := obj.(PodEvent)
|
event, ok := obj.(PodEvent)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("could not cast to PodEvent")
|
return fmt.Errorf("could not cast to PodEvent")
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,13 @@
|
||||||
package cluster
|
package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
@ -26,18 +28,23 @@ import (
|
||||||
const (
|
const (
|
||||||
superUserName = "postgres"
|
superUserName = "postgres"
|
||||||
replicationUserName = "standby"
|
replicationUserName = "standby"
|
||||||
exampleSpiloConfig = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`
|
poolerUserName = "pooler"
|
||||||
spiloConfigDiff = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`
|
adminUserName = "admin"
|
||||||
|
exampleSpiloConfig = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`
|
||||||
|
spiloConfigDiff = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`
|
||||||
)
|
)
|
||||||
|
|
||||||
var logger = logrus.New().WithField("test", "cluster")
|
var logger = logrus.New().WithField("test", "cluster")
|
||||||
var eventRecorder = record.NewFakeRecorder(1)
|
|
||||||
|
// eventRecorder needs buffer for TestCreate which emit events for
|
||||||
|
// 1 cluster, primary endpoint, 2 services, the secrets, the statefulset and pods being ready
|
||||||
|
var eventRecorder = record.NewFakeRecorder(7)
|
||||||
|
|
||||||
var cl = New(
|
var cl = New(
|
||||||
Config{
|
Config{
|
||||||
OpConfig: config.Config{
|
OpConfig: config.Config{
|
||||||
PodManagementPolicy: "ordered_ready",
|
PodManagementPolicy: "ordered_ready",
|
||||||
ProtectedRoles: []string{"admin", "cron_admin", "part_man"},
|
ProtectedRoles: []string{adminUserName, "cron_admin", "part_man"},
|
||||||
Auth: config.Auth{
|
Auth: config.Auth{
|
||||||
SuperUsername: superUserName,
|
SuperUsername: superUserName,
|
||||||
ReplicationUsername: replicationUserName,
|
ReplicationUsername: replicationUserName,
|
||||||
|
|
@ -46,6 +53,9 @@ var cl = New(
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
DownscalerAnnotations: []string{"downscaler/*"},
|
DownscalerAnnotations: []string{"downscaler/*"},
|
||||||
},
|
},
|
||||||
|
ConnectionPooler: config.ConnectionPooler{
|
||||||
|
User: poolerUserName,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
k8sutil.NewMockKubernetesClient(),
|
k8sutil.NewMockKubernetesClient(),
|
||||||
|
|
@ -55,17 +65,104 @@ var cl = New(
|
||||||
Namespace: "test",
|
Namespace: "test",
|
||||||
Annotations: map[string]string{"downscaler/downtime_replicas": "0"},
|
Annotations: map[string]string{"downscaler/downtime_replicas": "0"},
|
||||||
},
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
EnableConnectionPooler: util.True(),
|
||||||
|
Streams: []acidv1.Stream{
|
||||||
|
acidv1.Stream{
|
||||||
|
ApplicationId: "test-app",
|
||||||
|
Database: "test_db",
|
||||||
|
Tables: map[string]acidv1.StreamTable{
|
||||||
|
"test_table": acidv1.StreamTable{
|
||||||
|
EventType: "test-app.test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
logger,
|
logger,
|
||||||
eventRecorder,
|
eventRecorder,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestCreate(t *testing.T) {
|
||||||
|
clientSet := fake.NewSimpleClientset()
|
||||||
|
acidClientSet := fakeacidv1.NewSimpleClientset()
|
||||||
|
clusterName := "cluster-with-finalizer"
|
||||||
|
clusterNamespace := "test"
|
||||||
|
|
||||||
|
client := k8sutil.KubernetesClient{
|
||||||
|
DeploymentsGetter: clientSet.AppsV1(),
|
||||||
|
EndpointsGetter: clientSet.CoreV1(),
|
||||||
|
PersistentVolumeClaimsGetter: clientSet.CoreV1(),
|
||||||
|
PodDisruptionBudgetsGetter: clientSet.PolicyV1(),
|
||||||
|
PodsGetter: clientSet.CoreV1(),
|
||||||
|
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||||
|
ServicesGetter: clientSet.CoreV1(),
|
||||||
|
SecretsGetter: clientSet.CoreV1(),
|
||||||
|
StatefulSetsGetter: clientSet.AppsV1(),
|
||||||
|
}
|
||||||
|
|
||||||
|
pg := acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: clusterNamespace,
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1Gi",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pod := v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("%s-0", clusterName),
|
||||||
|
Namespace: clusterNamespace,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"application": "spilo",
|
||||||
|
"cluster-name": clusterName,
|
||||||
|
"spilo-role": "master",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// manually create resources which must be found by further API calls and are not created by cluster.Create()
|
||||||
|
client.Postgresqls(clusterNamespace).Create(context.TODO(), &pg, metav1.CreateOptions{})
|
||||||
|
client.Pods(clusterNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
|
||||||
|
|
||||||
|
var cluster = New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterLabels: map[string]string{"application": "spilo"},
|
||||||
|
ClusterNameLabel: "cluster-name",
|
||||||
|
DefaultCPURequest: "300m",
|
||||||
|
DefaultCPULimit: "300m",
|
||||||
|
DefaultMemoryRequest: "300Mi",
|
||||||
|
DefaultMemoryLimit: "300Mi",
|
||||||
|
PodRoleLabel: "spilo-role",
|
||||||
|
ResourceCheckInterval: time.Duration(3),
|
||||||
|
ResourceCheckTimeout: time.Duration(10),
|
||||||
|
},
|
||||||
|
EnableFinalizers: util.True(),
|
||||||
|
},
|
||||||
|
}, client, pg, logger, eventRecorder)
|
||||||
|
|
||||||
|
err := cluster.Create()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
if !cluster.hasFinalizer() {
|
||||||
|
t.Errorf("%s - expected finalizer not found on cluster", t.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestStatefulSetAnnotations(t *testing.T) {
|
func TestStatefulSetAnnotations(t *testing.T) {
|
||||||
spec := acidv1.PostgresSpec{
|
spec := acidv1.PostgresSpec{
|
||||||
TeamID: "myapp", NumberOfInstances: 1,
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
|
|
@ -87,8 +184,8 @@ func TestStatefulSetUpdateWithEnv(t *testing.T) {
|
||||||
oldSpec := &acidv1.PostgresSpec{
|
oldSpec := &acidv1.PostgresSpec{
|
||||||
TeamID: "myapp", NumberOfInstances: 1,
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
|
|
@ -127,56 +224,85 @@ func TestStatefulSetUpdateWithEnv(t *testing.T) {
|
||||||
|
|
||||||
func TestInitRobotUsers(t *testing.T) {
|
func TestInitRobotUsers(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
testCase string
|
||||||
manifestUsers map[string]acidv1.UserFlags
|
manifestUsers map[string]acidv1.UserFlags
|
||||||
infraRoles map[string]spec.PgUser
|
infraRoles map[string]spec.PgUser
|
||||||
result map[string]spec.PgUser
|
result map[string]spec.PgUser
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
testCase: "manifest user called like infrastructure role - latter should take percedence",
|
||||||
manifestUsers: map[string]acidv1.UserFlags{"foo": {"superuser", "createdb"}},
|
manifestUsers: map[string]acidv1.UserFlags{"foo": {"superuser", "createdb"}},
|
||||||
infraRoles: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Namespace: cl.Namespace, Password: "bar"}},
|
infraRoles: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Namespace: cl.Namespace, Password: "bar"}},
|
||||||
result: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Namespace: cl.Namespace, Password: "bar"}},
|
result: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Namespace: cl.Namespace, Password: "bar"}},
|
||||||
err: nil,
|
err: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
testCase: "manifest user with forbidden characters",
|
||||||
manifestUsers: map[string]acidv1.UserFlags{"!fooBar": {"superuser", "createdb"}},
|
manifestUsers: map[string]acidv1.UserFlags{"!fooBar": {"superuser", "createdb"}},
|
||||||
err: fmt.Errorf(`invalid username: "!fooBar"`),
|
err: fmt.Errorf(`invalid username: "!fooBar"`),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
testCase: "manifest user with unknown privileges (should be catched by CRD, too)",
|
||||||
manifestUsers: map[string]acidv1.UserFlags{"foobar": {"!superuser", "createdb"}},
|
manifestUsers: map[string]acidv1.UserFlags{"foobar": {"!superuser", "createdb"}},
|
||||||
err: fmt.Errorf(`invalid flags for user "foobar": ` +
|
err: fmt.Errorf(`invalid flags for user "foobar": ` +
|
||||||
`user flag "!superuser" is not alphanumeric`),
|
`user flag "!superuser" is not alphanumeric`),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
testCase: "manifest user with unknown privileges - part 2 (should be catched by CRD, too)",
|
||||||
manifestUsers: map[string]acidv1.UserFlags{"foobar": {"superuser1", "createdb"}},
|
manifestUsers: map[string]acidv1.UserFlags{"foobar": {"superuser1", "createdb"}},
|
||||||
err: fmt.Errorf(`invalid flags for user "foobar": ` +
|
err: fmt.Errorf(`invalid flags for user "foobar": ` +
|
||||||
`user flag "SUPERUSER1" is not valid`),
|
`user flag "SUPERUSER1" is not valid`),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
testCase: "manifest user with conflicting flags",
|
||||||
manifestUsers: map[string]acidv1.UserFlags{"foobar": {"inherit", "noinherit"}},
|
manifestUsers: map[string]acidv1.UserFlags{"foobar": {"inherit", "noinherit"}},
|
||||||
err: fmt.Errorf(`invalid flags for user "foobar": ` +
|
err: fmt.Errorf(`invalid flags for user "foobar": ` +
|
||||||
`conflicting user flags: "NOINHERIT" and "INHERIT"`),
|
`conflicting user flags: "NOINHERIT" and "INHERIT"`),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
manifestUsers: map[string]acidv1.UserFlags{"admin": {"superuser"}, superUserName: {"createdb"}},
|
testCase: "manifest user called like Spilo system users",
|
||||||
|
manifestUsers: map[string]acidv1.UserFlags{superUserName: {"createdb"}, replicationUserName: {"replication"}},
|
||||||
|
infraRoles: map[string]spec.PgUser{},
|
||||||
|
result: map[string]spec.PgUser{},
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testCase: "manifest user called like protected user name",
|
||||||
|
manifestUsers: map[string]acidv1.UserFlags{adminUserName: {"superuser"}},
|
||||||
|
infraRoles: map[string]spec.PgUser{},
|
||||||
|
result: map[string]spec.PgUser{},
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testCase: "manifest user called like pooler system user",
|
||||||
|
manifestUsers: map[string]acidv1.UserFlags{poolerUserName: {}},
|
||||||
|
infraRoles: map[string]spec.PgUser{},
|
||||||
|
result: map[string]spec.PgUser{},
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testCase: "manifest user called like stream system user",
|
||||||
|
manifestUsers: map[string]acidv1.UserFlags{"fes_user": {"replication"}},
|
||||||
infraRoles: map[string]spec.PgUser{},
|
infraRoles: map[string]spec.PgUser{},
|
||||||
result: map[string]spec.PgUser{},
|
result: map[string]spec.PgUser{},
|
||||||
err: nil,
|
err: nil,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
cl.initSystemUsers()
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
cl.Spec.Users = tt.manifestUsers
|
cl.Spec.Users = tt.manifestUsers
|
||||||
cl.pgUsers = tt.infraRoles
|
cl.pgUsers = tt.infraRoles
|
||||||
if err := cl.initRobotUsers(); err != nil {
|
if err := cl.initRobotUsers(); err != nil {
|
||||||
if tt.err == nil {
|
if tt.err == nil {
|
||||||
t.Errorf("%s got an unexpected error: %v", t.Name(), err)
|
t.Errorf("%s - %s: got an unexpected error: %v", tt.testCase, t.Name(), err)
|
||||||
}
|
}
|
||||||
if err.Error() != tt.err.Error() {
|
if err.Error() != tt.err.Error() {
|
||||||
t.Errorf("%s expected error %v, got %v", t.Name(), tt.err, err)
|
t.Errorf("%s - %s: expected error %v, got %v", tt.testCase, t.Name(), tt.err, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !reflect.DeepEqual(cl.pgUsers, tt.result) {
|
if !reflect.DeepEqual(cl.pgUsers, tt.result) {
|
||||||
t.Errorf("%s expected: %#v, got %#v", t.Name(), tt.result, cl.pgUsers)
|
t.Errorf("%s - %s: expected: %#v, got %#v", tt.testCase, t.Name(), tt.result, cl.pgUsers)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -269,7 +395,7 @@ func TestInitHumanUsers(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
existingRoles: map[string]spec.PgUser{},
|
existingRoles: map[string]spec.PgUser{},
|
||||||
teamRoles: []string{"admin", replicationUserName},
|
teamRoles: []string{adminUserName, replicationUserName},
|
||||||
result: map[string]spec.PgUser{},
|
result: map[string]spec.PgUser{},
|
||||||
err: nil,
|
err: nil,
|
||||||
},
|
},
|
||||||
|
|
@ -896,6 +1022,11 @@ func TestServiceAnnotations(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInitSystemUsers(t *testing.T) {
|
func TestInitSystemUsers(t *testing.T) {
|
||||||
|
// reset system users, pooler and stream section
|
||||||
|
cl.systemUsers = make(map[string]spec.PgUser)
|
||||||
|
cl.Spec.EnableConnectionPooler = boolToPointer(false)
|
||||||
|
cl.Spec.Streams = []acidv1.Stream{}
|
||||||
|
|
||||||
// default cluster without connection pooler and event streams
|
// default cluster without connection pooler and event streams
|
||||||
cl.initSystemUsers()
|
cl.initSystemUsers()
|
||||||
if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; exist {
|
if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; exist {
|
||||||
|
|
@ -914,35 +1045,35 @@ func TestInitSystemUsers(t *testing.T) {
|
||||||
|
|
||||||
// superuser is not allowed as connection pool user
|
// superuser is not allowed as connection pool user
|
||||||
cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{
|
cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{
|
||||||
User: "postgres",
|
User: superUserName,
|
||||||
}
|
}
|
||||||
cl.OpConfig.SuperUsername = "postgres"
|
cl.OpConfig.SuperUsername = superUserName
|
||||||
cl.OpConfig.ConnectionPooler.User = "pooler"
|
cl.OpConfig.ConnectionPooler.User = poolerUserName
|
||||||
|
|
||||||
cl.initSystemUsers()
|
cl.initSystemUsers()
|
||||||
if _, exist := cl.systemUsers["pooler"]; !exist {
|
if _, exist := cl.systemUsers[poolerUserName]; !exist {
|
||||||
t.Errorf("%s, Superuser is not allowed to be a connection pool user", t.Name())
|
t.Errorf("%s, Superuser is not allowed to be a connection pool user", t.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
// neither protected users are
|
// neither protected users are
|
||||||
delete(cl.systemUsers, "pooler")
|
delete(cl.systemUsers, poolerUserName)
|
||||||
cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{
|
cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{
|
||||||
User: "admin",
|
User: adminUserName,
|
||||||
}
|
}
|
||||||
cl.OpConfig.ProtectedRoles = []string{"admin"}
|
cl.OpConfig.ProtectedRoles = []string{adminUserName}
|
||||||
|
|
||||||
cl.initSystemUsers()
|
cl.initSystemUsers()
|
||||||
if _, exist := cl.systemUsers["pooler"]; !exist {
|
if _, exist := cl.systemUsers[poolerUserName]; !exist {
|
||||||
t.Errorf("%s, Protected user are not allowed to be a connection pool user", t.Name())
|
t.Errorf("%s, Protected user are not allowed to be a connection pool user", t.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(cl.systemUsers, "pooler")
|
delete(cl.systemUsers, poolerUserName)
|
||||||
cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{
|
cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{
|
||||||
User: "standby",
|
User: replicationUserName,
|
||||||
}
|
}
|
||||||
|
|
||||||
cl.initSystemUsers()
|
cl.initSystemUsers()
|
||||||
if _, exist := cl.systemUsers["pooler"]; !exist {
|
if _, exist := cl.systemUsers[poolerUserName]; !exist {
|
||||||
t.Errorf("%s, System users are not allowed to be a connection pool user", t.Name())
|
t.Errorf("%s, System users are not allowed to be a connection pool user", t.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -960,8 +1091,8 @@ func TestInitSystemUsers(t *testing.T) {
|
||||||
ApplicationId: "test-app",
|
ApplicationId: "test-app",
|
||||||
Database: "test_db",
|
Database: "test_db",
|
||||||
Tables: map[string]acidv1.StreamTable{
|
Tables: map[string]acidv1.StreamTable{
|
||||||
"data.test_table": {
|
"test_table": {
|
||||||
EventType: "test_event",
|
EventType: "test-app.test",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -1017,7 +1148,7 @@ func TestPreparedDatabases(t *testing.T) {
|
||||||
subTest: "Test admin role of owner",
|
subTest: "Test admin role of owner",
|
||||||
role: "foo_owner",
|
role: "foo_owner",
|
||||||
memberOf: "",
|
memberOf: "",
|
||||||
admin: "admin",
|
admin: adminUserName,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "Test writer is a member of reader",
|
subTest: "Test writer is a member of reader",
|
||||||
|
|
@ -1062,17 +1193,13 @@ func TestCompareSpiloConfiguration(t *testing.T) {
|
||||||
ExpectedResult bool
|
ExpectedResult bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
`{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
|
`{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
`{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"200","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
|
`{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"200","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
`{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"200","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
`{}`,
|
`{}`,
|
||||||
false,
|
false,
|
||||||
|
|
|
||||||
|
|
@ -610,7 +610,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
|
||||||
Delete(context.TODO(), deployment.Name, options)
|
Delete(context.TODO(), deployment.Name, options)
|
||||||
|
|
||||||
if k8sutil.ResourceNotFound(err) {
|
if k8sutil.ResourceNotFound(err) {
|
||||||
c.logger.Debugf("connection pooler deployment was already deleted")
|
c.logger.Debugf("connection pooler deployment %s for role %s has already been deleted", deployment.Name, role)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return fmt.Errorf("could not delete connection pooler deployment: %v", err)
|
return fmt.Errorf("could not delete connection pooler deployment: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -629,7 +629,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
|
||||||
Delete(context.TODO(), service.Name, options)
|
Delete(context.TODO(), service.Name, options)
|
||||||
|
|
||||||
if k8sutil.ResourceNotFound(err) {
|
if k8sutil.ResourceNotFound(err) {
|
||||||
c.logger.Debugf("connection pooler service was already deleted")
|
c.logger.Debugf("connection pooler service %s for role %s has already been already deleted", service.Name, role)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return fmt.Errorf("could not delete connection pooler service: %v", err)
|
return fmt.Errorf("could not delete connection pooler service: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -821,12 +821,12 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1.
|
||||||
func makeDefaultConnectionPoolerResources(config *config.Config) acidv1.Resources {
|
func makeDefaultConnectionPoolerResources(config *config.Config) acidv1.Resources {
|
||||||
|
|
||||||
defaultRequests := acidv1.ResourceDescription{
|
defaultRequests := acidv1.ResourceDescription{
|
||||||
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest,
|
CPU: &config.ConnectionPooler.ConnectionPoolerDefaultCPURequest,
|
||||||
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest,
|
Memory: &config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest,
|
||||||
}
|
}
|
||||||
defaultLimits := acidv1.ResourceDescription{
|
defaultLimits := acidv1.ResourceDescription{
|
||||||
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit,
|
CPU: &config.ConnectionPooler.ConnectionPoolerDefaultCPULimit,
|
||||||
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit,
|
Memory: &config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit,
|
||||||
}
|
}
|
||||||
|
|
||||||
return acidv1.Resources{
|
return acidv1.Resources{
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
@ -711,47 +710,42 @@ func TestConnectionPoolerPodSpec(t *testing.T) {
|
||||||
noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil }
|
noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil }
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
subTest string
|
subTest string
|
||||||
spec *acidv1.PostgresSpec
|
spec *acidv1.PostgresSpec
|
||||||
expected error
|
cluster *Cluster
|
||||||
cluster *Cluster
|
check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error
|
||||||
check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
subTest: "default configuration",
|
subTest: "default configuration",
|
||||||
spec: &acidv1.PostgresSpec{
|
spec: &acidv1.PostgresSpec{
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
expected: nil,
|
cluster: cluster,
|
||||||
cluster: cluster,
|
check: noCheck,
|
||||||
check: noCheck,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "pooler uses pod service account",
|
subTest: "pooler uses pod service account",
|
||||||
spec: &acidv1.PostgresSpec{
|
spec: &acidv1.PostgresSpec{
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
expected: nil,
|
cluster: cluster,
|
||||||
cluster: cluster,
|
check: testServiceAccount,
|
||||||
check: testServiceAccount,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "no default resources",
|
subTest: "no default resources",
|
||||||
spec: &acidv1.PostgresSpec{
|
spec: &acidv1.PostgresSpec{
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`),
|
cluster: clusterNoDefaultRes,
|
||||||
cluster: clusterNoDefaultRes,
|
check: noCheck,
|
||||||
check: noCheck,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "default resources are set",
|
subTest: "default resources are set",
|
||||||
spec: &acidv1.PostgresSpec{
|
spec: &acidv1.PostgresSpec{
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
expected: nil,
|
cluster: cluster,
|
||||||
cluster: cluster,
|
check: testResources,
|
||||||
check: testResources,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "labels for service",
|
subTest: "labels for service",
|
||||||
|
|
@ -759,30 +753,23 @@ func TestConnectionPoolerPodSpec(t *testing.T) {
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
EnableReplicaConnectionPooler: boolToPointer(true),
|
EnableReplicaConnectionPooler: boolToPointer(true),
|
||||||
},
|
},
|
||||||
expected: nil,
|
cluster: cluster,
|
||||||
cluster: cluster,
|
check: testLabels,
|
||||||
check: testLabels,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "required envs",
|
subTest: "required envs",
|
||||||
spec: &acidv1.PostgresSpec{
|
spec: &acidv1.PostgresSpec{
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
expected: nil,
|
cluster: cluster,
|
||||||
cluster: cluster,
|
check: testEnvs,
|
||||||
check: testEnvs,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(role)
|
podSpec, _ := tt.cluster.generateConnectionPoolerPodTemplate(role)
|
||||||
|
|
||||||
if err != tt.expected && err.Error() != tt.expected.Error() {
|
err := tt.check(cluster, podSpec, role)
|
||||||
t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v",
|
|
||||||
testName, tt.subTest, err, tt.expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tt.check(cluster, podSpec, role)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("%s [%s]: Pod spec is incorrect, %+v",
|
t.Errorf("%s [%s]: Pod spec is incorrect, %+v",
|
||||||
testName, tt.subTest, err)
|
testName, tt.subTest, err)
|
||||||
|
|
@ -973,8 +960,8 @@ func TestPoolerTLS(t *testing.T) {
|
||||||
TeamID: "myapp", NumberOfInstances: 1,
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
EnableConnectionPooler: util.True(),
|
EnableConnectionPooler: util.True(),
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
|
|
|
||||||
|
|
@ -205,7 +205,11 @@ func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUser
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err2 := rows.Close(); err2 != nil {
|
if err2 := rows.Close(); err2 != nil {
|
||||||
err = fmt.Errorf("error when closing query cursor: %v", err2)
|
if err != nil {
|
||||||
|
err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("error when closing query cursor: %v", err2)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
@ -252,7 +256,11 @@ func findUsersFromRotation(rotatedUsers []string, db *sql.DB) (map[string]string
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err2 := rows.Close(); err2 != nil {
|
if err2 := rows.Close(); err2 != nil {
|
||||||
err = fmt.Errorf("error when closing query cursor: %v", err2)
|
if err != nil {
|
||||||
|
err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("error when closing query cursor: %v", err2)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,11 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
|
|
@ -28,9 +33,6 @@ import (
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -64,9 +66,8 @@ type patroniDCS struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type pgBootstrap struct {
|
type pgBootstrap struct {
|
||||||
Initdb []interface{} `json:"initdb"`
|
Initdb []interface{} `json:"initdb"`
|
||||||
Users map[string]pgUser `json:"users"`
|
DCS patroniDCS `json:"dcs,omitempty"`
|
||||||
DCS patroniDCS `json:"dcs,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type spiloConfiguration struct {
|
type spiloConfiguration struct {
|
||||||
|
|
@ -126,12 +127,12 @@ func (c *Cluster) podDisruptionBudgetName() string {
|
||||||
func makeDefaultResources(config *config.Config) acidv1.Resources {
|
func makeDefaultResources(config *config.Config) acidv1.Resources {
|
||||||
|
|
||||||
defaultRequests := acidv1.ResourceDescription{
|
defaultRequests := acidv1.ResourceDescription{
|
||||||
CPU: config.Resources.DefaultCPURequest,
|
CPU: &config.Resources.DefaultCPURequest,
|
||||||
Memory: config.Resources.DefaultMemoryRequest,
|
Memory: &config.Resources.DefaultMemoryRequest,
|
||||||
}
|
}
|
||||||
defaultLimits := acidv1.ResourceDescription{
|
defaultLimits := acidv1.ResourceDescription{
|
||||||
CPU: config.Resources.DefaultCPULimit,
|
CPU: &config.Resources.DefaultCPULimit,
|
||||||
Memory: config.Resources.DefaultMemoryLimit,
|
Memory: &config.Resources.DefaultMemoryLimit,
|
||||||
}
|
}
|
||||||
|
|
||||||
return acidv1.Resources{
|
return acidv1.Resources{
|
||||||
|
|
@ -143,12 +144,12 @@ func makeDefaultResources(config *config.Config) acidv1.Resources {
|
||||||
func makeLogicalBackupResources(config *config.Config) acidv1.Resources {
|
func makeLogicalBackupResources(config *config.Config) acidv1.Resources {
|
||||||
|
|
||||||
logicalBackupResourceRequests := acidv1.ResourceDescription{
|
logicalBackupResourceRequests := acidv1.ResourceDescription{
|
||||||
CPU: config.LogicalBackup.LogicalBackupCPURequest,
|
CPU: &config.LogicalBackup.LogicalBackupCPURequest,
|
||||||
Memory: config.LogicalBackup.LogicalBackupMemoryRequest,
|
Memory: &config.LogicalBackup.LogicalBackupMemoryRequest,
|
||||||
}
|
}
|
||||||
logicalBackupResourceLimits := acidv1.ResourceDescription{
|
logicalBackupResourceLimits := acidv1.ResourceDescription{
|
||||||
CPU: config.LogicalBackup.LogicalBackupCPULimit,
|
CPU: &config.LogicalBackup.LogicalBackupCPULimit,
|
||||||
Memory: config.LogicalBackup.LogicalBackupMemoryLimit,
|
Memory: &config.LogicalBackup.LogicalBackupMemoryLimit,
|
||||||
}
|
}
|
||||||
|
|
||||||
return acidv1.Resources{
|
return acidv1.Resources{
|
||||||
|
|
@ -214,7 +215,9 @@ func (c *Cluster) enforceMaxResourceRequests(resources *v1.ResourceRequirements)
|
||||||
return fmt.Errorf("could not compare defined CPU request %s for %q container with configured maximum value %s: %v",
|
return fmt.Errorf("could not compare defined CPU request %s for %q container with configured maximum value %s: %v",
|
||||||
cpuRequest.String(), constants.PostgresContainerName, maxCPURequest, err)
|
cpuRequest.String(), constants.PostgresContainerName, maxCPURequest, err)
|
||||||
}
|
}
|
||||||
resources.Requests[v1.ResourceCPU] = maxCPU
|
if !maxCPU.IsZero() {
|
||||||
|
resources.Requests[v1.ResourceCPU] = maxCPU
|
||||||
|
}
|
||||||
|
|
||||||
memoryRequest := resources.Requests[v1.ResourceMemory]
|
memoryRequest := resources.Requests[v1.ResourceMemory]
|
||||||
maxMemoryRequest := c.OpConfig.MaxMemoryRequest
|
maxMemoryRequest := c.OpConfig.MaxMemoryRequest
|
||||||
|
|
@ -223,7 +226,9 @@ func (c *Cluster) enforceMaxResourceRequests(resources *v1.ResourceRequirements)
|
||||||
return fmt.Errorf("could not compare defined memory request %s for %q container with configured maximum value %s: %v",
|
return fmt.Errorf("could not compare defined memory request %s for %q container with configured maximum value %s: %v",
|
||||||
memoryRequest.String(), constants.PostgresContainerName, maxMemoryRequest, err)
|
memoryRequest.String(), constants.PostgresContainerName, maxMemoryRequest, err)
|
||||||
}
|
}
|
||||||
resources.Requests[v1.ResourceMemory] = maxMemory
|
if !maxMemory.IsZero() {
|
||||||
|
resources.Requests[v1.ResourceMemory] = maxMemory
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -240,30 +245,66 @@ func setMemoryRequestToLimit(resources *v1.ResourceRequirements, containerName s
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func matchLimitsWithRequestsIfSmaller(resources *v1.ResourceRequirements, containerName string, logger *logrus.Entry) {
|
||||||
|
requests := resources.Requests
|
||||||
|
limits := resources.Limits
|
||||||
|
requestCPU, cpuRequestsExists := requests[v1.ResourceCPU]
|
||||||
|
limitCPU, cpuLimitExists := limits[v1.ResourceCPU]
|
||||||
|
if cpuRequestsExists && cpuLimitExists && limitCPU.Cmp(requestCPU) == -1 {
|
||||||
|
logger.Warningf("CPU limit of %s for %q container is increased to match CPU requests of %s", limitCPU.String(), containerName, requestCPU.String())
|
||||||
|
resources.Limits[v1.ResourceCPU] = requestCPU
|
||||||
|
}
|
||||||
|
|
||||||
|
requestMemory, memoryRequestsExists := requests[v1.ResourceMemory]
|
||||||
|
limitMemory, memoryLimitExists := limits[v1.ResourceMemory]
|
||||||
|
if memoryRequestsExists && memoryLimitExists && limitMemory.Cmp(requestMemory) == -1 {
|
||||||
|
logger.Warningf("memory limit of %s for %q container is increased to match memory requests of %s", limitMemory.String(), containerName, requestMemory.String())
|
||||||
|
resources.Limits[v1.ResourceMemory] = requestMemory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceDescription) (v1.ResourceList, error) {
|
func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceDescription) (v1.ResourceList, error) {
|
||||||
var err error
|
var err error
|
||||||
requests := v1.ResourceList{}
|
requests := v1.ResourceList{}
|
||||||
|
emptyResourceExamples := []string{"", "0", "null"}
|
||||||
|
|
||||||
if spec.CPU != "" {
|
if spec.CPU != nil && !slices.Contains(emptyResourceExamples, *spec.CPU) {
|
||||||
requests[v1.ResourceCPU], err = resource.ParseQuantity(spec.CPU)
|
requests[v1.ResourceCPU], err = resource.ParseQuantity(*spec.CPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not parse CPU quantity: %v", err)
|
return nil, fmt.Errorf("could not parse CPU quantity: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
requests[v1.ResourceCPU], err = resource.ParseQuantity(defaults.CPU)
|
if defaults.CPU != nil && !slices.Contains(emptyResourceExamples, *defaults.CPU) {
|
||||||
if err != nil {
|
requests[v1.ResourceCPU], err = resource.ParseQuantity(*defaults.CPU)
|
||||||
return nil, fmt.Errorf("could not parse default CPU quantity: %v", err)
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not parse default CPU quantity: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if spec.Memory != "" {
|
if spec.Memory != nil && !slices.Contains(emptyResourceExamples, *spec.Memory) {
|
||||||
requests[v1.ResourceMemory], err = resource.ParseQuantity(spec.Memory)
|
requests[v1.ResourceMemory], err = resource.ParseQuantity(*spec.Memory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not parse memory quantity: %v", err)
|
return nil, fmt.Errorf("could not parse memory quantity: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
requests[v1.ResourceMemory], err = resource.ParseQuantity(defaults.Memory)
|
if defaults.Memory != nil && !slices.Contains(emptyResourceExamples, *defaults.Memory) {
|
||||||
|
requests[v1.ResourceMemory], err = resource.ParseQuantity(*defaults.Memory)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not parse default memory quantity: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if spec.HugePages2Mi != nil {
|
||||||
|
requests[v1.ResourceHugePagesPrefix+"2Mi"], err = resource.ParseQuantity(*spec.HugePages2Mi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not parse default memory quantity: %v", err)
|
return nil, fmt.Errorf("could not parse hugepages-2Mi quantity: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if spec.HugePages1Gi != nil {
|
||||||
|
requests[v1.ResourceHugePagesPrefix+"1Gi"], err = resource.ParseQuantity(*spec.HugePages1Gi)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not parse hugepages-1Gi quantity: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -301,6 +342,10 @@ func (c *Cluster) generateResourceRequirements(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// make sure after reflecting default and enforcing min limit values we don't have requests > limits
|
||||||
|
matchLimitsWithRequestsIfSmaller(&result, containerName, c.logger)
|
||||||
|
|
||||||
|
// vice versa set memory requests to limit if option is enabled
|
||||||
if c.OpConfig.SetMemoryRequestToLimit {
|
if c.OpConfig.SetMemoryRequestToLimit {
|
||||||
setMemoryRequestToLimit(&result, containerName, c.logger)
|
setMemoryRequestToLimit(&result, containerName, c.logger)
|
||||||
}
|
}
|
||||||
|
|
@ -430,13 +475,6 @@ PatroniInitDBParams:
|
||||||
config.PgLocalConfiguration[patroniPGHBAConfParameterName] = patroni.PgHba
|
config.PgLocalConfiguration[patroniPGHBAConfParameterName] = patroni.PgHba
|
||||||
}
|
}
|
||||||
|
|
||||||
config.Bootstrap.Users = map[string]pgUser{
|
|
||||||
opConfig.PamRoleName: {
|
|
||||||
Password: "",
|
|
||||||
Options: []string{constants.RoleFlagCreateDB, constants.RoleFlagNoLogin},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := json.Marshal(config)
|
res, err := json.Marshal(config)
|
||||||
return string(res), err
|
return string(res), err
|
||||||
}
|
}
|
||||||
|
|
@ -1145,6 +1183,37 @@ func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) {
|
||||||
return secretPodEnvVarsList, nil
|
return secretPodEnvVarsList, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return list of variables the cronjob received from the configured Secret
|
||||||
|
func (c *Cluster) getCronjobEnvironmentSecretVariables() ([]v1.EnvVar, error) {
|
||||||
|
secretCronjobEnvVarsList := make([]v1.EnvVar, 0)
|
||||||
|
|
||||||
|
if c.OpConfig.LogicalBackupCronjobEnvironmentSecret == "" {
|
||||||
|
return secretCronjobEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
secret, err := c.KubeClient.Secrets(c.Namespace).Get(
|
||||||
|
context.TODO(),
|
||||||
|
c.OpConfig.LogicalBackupCronjobEnvironmentSecret,
|
||||||
|
metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read Secret CronjobEnvironmentSecretName: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range secret.Data {
|
||||||
|
secretCronjobEnvVarsList = append(secretCronjobEnvVarsList,
|
||||||
|
v1.EnvVar{Name: k, ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: c.OpConfig.LogicalBackupCronjobEnvironmentSecret,
|
||||||
|
},
|
||||||
|
Key: k,
|
||||||
|
},
|
||||||
|
}})
|
||||||
|
}
|
||||||
|
|
||||||
|
return secretCronjobEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container {
|
func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container {
|
||||||
name := sidecar.Name
|
name := sidecar.Name
|
||||||
if name == "" {
|
if name == "" {
|
||||||
|
|
@ -1171,12 +1240,12 @@ func getBucketScopeSuffix(uid string) string {
|
||||||
func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acidv1.Resources {
|
func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acidv1.Resources {
|
||||||
return acidv1.Resources{
|
return acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{
|
ResourceRequests: acidv1.ResourceDescription{
|
||||||
CPU: cpuRequest,
|
CPU: &cpuRequest,
|
||||||
Memory: memoryRequest,
|
Memory: &memoryRequest,
|
||||||
},
|
},
|
||||||
ResourceLimits: acidv1.ResourceDescription{
|
ResourceLimits: acidv1.ResourceDescription{
|
||||||
CPU: cpuLimit,
|
CPU: &cpuLimit,
|
||||||
Memory: memoryLimit,
|
Memory: &memoryLimit,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1440,6 +1509,19 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var persistentVolumeClaimRetentionPolicy appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy
|
||||||
|
if c.OpConfig.PersistentVolumeClaimRetentionPolicy["when_deleted"] == "delete" {
|
||||||
|
persistentVolumeClaimRetentionPolicy.WhenDeleted = appsv1.DeletePersistentVolumeClaimRetentionPolicyType
|
||||||
|
} else {
|
||||||
|
persistentVolumeClaimRetentionPolicy.WhenDeleted = appsv1.RetainPersistentVolumeClaimRetentionPolicyType
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.OpConfig.PersistentVolumeClaimRetentionPolicy["when_scaled"] == "delete" {
|
||||||
|
persistentVolumeClaimRetentionPolicy.WhenScaled = appsv1.DeletePersistentVolumeClaimRetentionPolicyType
|
||||||
|
} else {
|
||||||
|
persistentVolumeClaimRetentionPolicy.WhenScaled = appsv1.RetainPersistentVolumeClaimRetentionPolicyType
|
||||||
|
}
|
||||||
|
|
||||||
statefulSet := &appsv1.StatefulSet{
|
statefulSet := &appsv1.StatefulSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.statefulSetName(),
|
Name: c.statefulSetName(),
|
||||||
|
|
@ -1448,13 +1530,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
|
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
|
||||||
},
|
},
|
||||||
Spec: appsv1.StatefulSetSpec{
|
Spec: appsv1.StatefulSetSpec{
|
||||||
Replicas: &numberOfInstances,
|
Replicas: &numberOfInstances,
|
||||||
Selector: c.labelsSelector(),
|
Selector: c.labelsSelector(),
|
||||||
ServiceName: c.serviceName(Master),
|
ServiceName: c.serviceName(Master),
|
||||||
Template: *podTemplate,
|
Template: *podTemplate,
|
||||||
VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate},
|
VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate},
|
||||||
UpdateStrategy: updateStrategy,
|
UpdateStrategy: updateStrategy,
|
||||||
PodManagementPolicy: podManagementPolicy,
|
PodManagementPolicy: podManagementPolicy,
|
||||||
|
PersistentVolumeClaimRetentionPolicy: &persistentVolumeClaimRetentionPolicy,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2113,12 +2196,19 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript
|
||||||
func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||||
minAvailable := intstr.FromInt(1)
|
minAvailable := intstr.FromInt(1)
|
||||||
pdbEnabled := c.OpConfig.EnablePodDisruptionBudget
|
pdbEnabled := c.OpConfig.EnablePodDisruptionBudget
|
||||||
|
pdbMasterLabelSelector := c.OpConfig.PDBMasterLabelSelector
|
||||||
|
|
||||||
// if PodDisruptionBudget is disabled or if there are no DB pods, set the budget to 0.
|
// if PodDisruptionBudget is disabled or if there are no DB pods, set the budget to 0.
|
||||||
if (pdbEnabled != nil && !(*pdbEnabled)) || c.Spec.NumberOfInstances <= 0 {
|
if (pdbEnabled != nil && !(*pdbEnabled)) || c.Spec.NumberOfInstances <= 0 {
|
||||||
minAvailable = intstr.FromInt(0)
|
minAvailable = intstr.FromInt(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// define label selector and add the master role selector if enabled
|
||||||
|
labels := c.labelsSet(false)
|
||||||
|
if pdbMasterLabelSelector == nil || *c.OpConfig.PDBMasterLabelSelector {
|
||||||
|
labels[c.OpConfig.PodRoleLabel] = string(Master)
|
||||||
|
}
|
||||||
|
|
||||||
return &policyv1.PodDisruptionBudget{
|
return &policyv1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.podDisruptionBudgetName(),
|
Name: c.podDisruptionBudgetName(),
|
||||||
|
|
@ -2129,7 +2219,7 @@ func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||||
Spec: policyv1.PodDisruptionBudgetSpec{
|
Spec: policyv1.PodDisruptionBudgetSpec{
|
||||||
MinAvailable: &minAvailable,
|
MinAvailable: &minAvailable,
|
||||||
Selector: &metav1.LabelSelector{
|
Selector: &metav1.LabelSelector{
|
||||||
MatchLabels: c.roleLabelsSet(false, Master),
|
MatchLabels: labels,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -2166,7 +2256,13 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) {
|
||||||
return nil, fmt.Errorf("could not generate resource requirements for logical backup pods: %v", err)
|
return nil, fmt.Errorf("could not generate resource requirements for logical backup pods: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
secretEnvVarsList, err := c.getCronjobEnvironmentSecretVariables()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
envVars := c.generateLogicalBackupPodEnvVars()
|
envVars := c.generateLogicalBackupPodEnvVars()
|
||||||
|
envVars = append(envVars, secretEnvVarsList...)
|
||||||
logicalBackupContainer := generateContainer(
|
logicalBackupContainer := generateContainer(
|
||||||
logicalBackupContainerName,
|
logicalBackupContainerName,
|
||||||
&c.OpConfig.LogicalBackup.LogicalBackupDockerImage,
|
&c.OpConfig.LogicalBackup.LogicalBackupDockerImage,
|
||||||
|
|
@ -2178,11 +2274,12 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) {
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
labels := map[string]string{
|
logicalBackupJobLabel := map[string]string{
|
||||||
c.OpConfig.ClusterNameLabel: c.Name,
|
"application": "spilo-logical-backup",
|
||||||
"application": "spilo-logical-backup",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
labels := labels.Merge(c.labelsSet(true), logicalBackupJobLabel)
|
||||||
|
|
||||||
nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, nil)
|
nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, nil)
|
||||||
podAffinity := podAffinity(
|
podAffinity := podAffinity(
|
||||||
labels,
|
labels,
|
||||||
|
|
@ -2198,7 +2295,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) {
|
||||||
if podTemplate, err = c.generatePodTemplate(
|
if podTemplate, err = c.generatePodTemplate(
|
||||||
c.Namespace,
|
c.Namespace,
|
||||||
labels,
|
labels,
|
||||||
annotations,
|
c.annotationsSet(annotations),
|
||||||
logicalBackupContainer,
|
logicalBackupContainer,
|
||||||
[]v1.Container{},
|
[]v1.Container{},
|
||||||
[]v1.Container{},
|
[]v1.Container{},
|
||||||
|
|
|
||||||
|
|
@ -5,9 +5,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
|
||||||
|
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
|
@ -73,18 +72,18 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
subtest: "Patroni default configuration",
|
subtest: "Patroni default configuration",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "15"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
||||||
patroni: &acidv1.Patroni{},
|
patroni: &acidv1.Patroni{},
|
||||||
opConfig: &config.Config{
|
opConfig: &config.Config{
|
||||||
Auth: config.Auth{
|
Auth: config.Auth{
|
||||||
PamRoleName: "zalandos",
|
PamRoleName: "zalandos",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{}}}`,
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "Patroni configured",
|
subtest: "Patroni configured",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "15"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
||||||
patroni: &acidv1.Patroni{
|
patroni: &acidv1.Patroni{
|
||||||
InitDB: map[string]string{
|
InitDB: map[string]string{
|
||||||
"encoding": "UTF8",
|
"encoding": "UTF8",
|
||||||
|
|
@ -102,52 +101,39 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
|
||||||
Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}},
|
Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}},
|
||||||
FailsafeMode: util.True(),
|
FailsafeMode: util.True(),
|
||||||
},
|
},
|
||||||
opConfig: &config.Config{
|
opConfig: &config.Config{},
|
||||||
Auth: config.Auth{
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`,
|
||||||
PamRoleName: "zalandos",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "Patroni failsafe_mode configured globally",
|
subtest: "Patroni failsafe_mode configured globally",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "15"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
||||||
patroni: &acidv1.Patroni{},
|
patroni: &acidv1.Patroni{},
|
||||||
opConfig: &config.Config{
|
opConfig: &config.Config{
|
||||||
Auth: config.Auth{
|
|
||||||
PamRoleName: "zalandos",
|
|
||||||
},
|
|
||||||
EnablePatroniFailsafeMode: util.True(),
|
EnablePatroniFailsafeMode: util.True(),
|
||||||
},
|
},
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"failsafe_mode":true}}}`,
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "Patroni failsafe_mode configured globally, disabled for cluster",
|
subtest: "Patroni failsafe_mode configured globally, disabled for cluster",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "15"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
||||||
patroni: &acidv1.Patroni{
|
patroni: &acidv1.Patroni{
|
||||||
FailsafeMode: util.False(),
|
FailsafeMode: util.False(),
|
||||||
},
|
},
|
||||||
opConfig: &config.Config{
|
opConfig: &config.Config{
|
||||||
Auth: config.Auth{
|
|
||||||
PamRoleName: "zalandos",
|
|
||||||
},
|
|
||||||
EnablePatroniFailsafeMode: util.True(),
|
EnablePatroniFailsafeMode: util.True(),
|
||||||
},
|
},
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"failsafe_mode":false}}}`,
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "Patroni failsafe_mode disabled globally, configured for cluster",
|
subtest: "Patroni failsafe_mode disabled globally, configured for cluster",
|
||||||
pgParam: &acidv1.PostgresqlParam{PgVersion: "15"},
|
pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
|
||||||
patroni: &acidv1.Patroni{
|
patroni: &acidv1.Patroni{
|
||||||
FailsafeMode: util.True(),
|
FailsafeMode: util.True(),
|
||||||
},
|
},
|
||||||
opConfig: &config.Config{
|
opConfig: &config.Config{
|
||||||
Auth: config.Auth{
|
|
||||||
PamRoleName: "zalandos",
|
|
||||||
},
|
|
||||||
EnablePatroniFailsafeMode: util.False(),
|
EnablePatroniFailsafeMode: util.False(),
|
||||||
},
|
},
|
||||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"failsafe_mode":true}}}`,
|
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
|
@ -157,8 +143,8 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
|
||||||
t.Errorf("Unexpected error: %v", err)
|
t.Errorf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if tt.result != result {
|
if tt.result != result {
|
||||||
t.Errorf("%s %s: Spilo Config is %v, expected %v for role %#v and param %#v",
|
t.Errorf("%s %s: Spilo Config is %v, expected %v and param %#v",
|
||||||
t.Name(), tt.subtest, result, tt.result, tt.opConfig.Auth.PamRoleName, tt.pgParam)
|
t.Name(), tt.subtest, result, tt.result, tt.pgParam)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -178,15 +164,15 @@ func TestExtractPgVersionFromBinPath(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "test current bin path against hard coded template",
|
subTest: "test current bin path against hard coded template",
|
||||||
binPath: "/usr/lib/postgresql/15/bin",
|
binPath: "/usr/lib/postgresql/16/bin",
|
||||||
template: pgBinariesLocationTemplate,
|
template: pgBinariesLocationTemplate,
|
||||||
expected: "15",
|
expected: "16",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "test alternative bin path against a matching template",
|
subTest: "test alternative bin path against a matching template",
|
||||||
binPath: "/usr/pgsql-15/bin",
|
binPath: "/usr/pgsql-16/bin",
|
||||||
template: "/usr/pgsql-%v/bin",
|
template: "/usr/pgsql-%v/bin",
|
||||||
expected: "15",
|
expected: "16",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -205,6 +191,7 @@ func TestExtractPgVersionFromBinPath(t *testing.T) {
|
||||||
const (
|
const (
|
||||||
testPodEnvironmentConfigMapName = "pod_env_cm"
|
testPodEnvironmentConfigMapName = "pod_env_cm"
|
||||||
testPodEnvironmentSecretName = "pod_env_sc"
|
testPodEnvironmentSecretName = "pod_env_sc"
|
||||||
|
testCronjobEnvironmentSecretName = "pod_env_sc"
|
||||||
testPodEnvironmentObjectNotExists = "idonotexist"
|
testPodEnvironmentObjectNotExists = "idonotexist"
|
||||||
testPodEnvironmentSecretNameAPIError = "pod_env_sc_apierror"
|
testPodEnvironmentSecretNameAPIError = "pod_env_sc_apierror"
|
||||||
testResourceCheckInterval = 3
|
testResourceCheckInterval = 3
|
||||||
|
|
@ -461,6 +448,96 @@ func TestPodEnvironmentSecretVariables(t *testing.T) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test if the keys of an existing secret are properly referenced
|
||||||
|
func TestCronjobEnvironmentSecretVariables(t *testing.T) {
|
||||||
|
testName := "TestCronjobEnvironmentSecretVariables"
|
||||||
|
tests := []struct {
|
||||||
|
subTest string
|
||||||
|
opConfig config.Config
|
||||||
|
envVars []v1.EnvVar
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
subTest: "No CronjobEnvironmentSecret configured",
|
||||||
|
envVars: []v1.EnvVar{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Secret referenced by CronjobEnvironmentSecret does not exist",
|
||||||
|
opConfig: config.Config{
|
||||||
|
LogicalBackup: config.LogicalBackup{
|
||||||
|
LogicalBackupCronjobEnvironmentSecret: "idonotexist",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("could not read Secret CronjobEnvironmentSecretName: secret.core \"idonotexist\" not found"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Cronjob environment vars reference all keys from secret configured by CronjobEnvironmentSecret",
|
||||||
|
opConfig: config.Config{
|
||||||
|
LogicalBackup: config.LogicalBackup{
|
||||||
|
LogicalBackupCronjobEnvironmentSecret: testCronjobEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
envVars: []v1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "clone_aws_access_key_id",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: testPodEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
Key: "clone_aws_access_key_id",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "custom_variable",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: testPodEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
Key: "custom_variable",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "standby_google_application_credentials",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: testPodEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
Key: "standby_google_application_credentials",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
c := newMockCluster(tt.opConfig)
|
||||||
|
vars, err := c.getCronjobEnvironmentSecretVariables()
|
||||||
|
sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name })
|
||||||
|
if !reflect.DeepEqual(vars, tt.envVars) {
|
||||||
|
t.Errorf("%s %s: expected `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.envVars, vars)
|
||||||
|
}
|
||||||
|
if tt.err != nil {
|
||||||
|
if err.Error() != tt.err.Error() {
|
||||||
|
t.Errorf("%s %s: expected error `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.err, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s %s: expected no error but got error: `%v`",
|
||||||
|
testName, tt.subTest, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error {
|
func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error {
|
||||||
required := map[string]bool{
|
required := map[string]bool{
|
||||||
"PGHOST": false,
|
"PGHOST": false,
|
||||||
|
|
@ -1344,8 +1421,8 @@ func TestNodeAffinity(t *testing.T) {
|
||||||
return acidv1.PostgresSpec{
|
return acidv1.PostgresSpec{
|
||||||
TeamID: "myapp", NumberOfInstances: 1,
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
|
|
@ -1437,8 +1514,8 @@ func TestPodAffinity(t *testing.T) {
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
NumberOfInstances: 1,
|
NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
|
|
@ -1581,8 +1658,8 @@ func TestTLS(t *testing.T) {
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
TeamID: "myapp", NumberOfInstances: 1,
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
|
|
@ -1822,8 +1899,8 @@ func TestAdditionalVolume(t *testing.T) {
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
TeamID: "myapp", NumberOfInstances: 1,
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
|
|
@ -1898,8 +1975,8 @@ func TestVolumeSelector(t *testing.T) {
|
||||||
TeamID: "myapp",
|
TeamID: "myapp",
|
||||||
NumberOfInstances: 0,
|
NumberOfInstances: 0,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: volume,
|
Volume: volume,
|
||||||
}
|
}
|
||||||
|
|
@ -2023,15 +2100,15 @@ func TestSidecars(t *testing.T) {
|
||||||
|
|
||||||
spec = acidv1.PostgresSpec{
|
spec = acidv1.PostgresSpec{
|
||||||
PostgresqlParam: acidv1.PostgresqlParam{
|
PostgresqlParam: acidv1.PostgresqlParam{
|
||||||
PgVersion: "15",
|
PgVersion: "16",
|
||||||
Parameters: map[string]string{
|
Parameters: map[string]string{
|
||||||
"max_connections": "100",
|
"max_connections": "100",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TeamID: "myapp", NumberOfInstances: 1,
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
|
|
@ -2043,8 +2120,8 @@ func TestSidecars(t *testing.T) {
|
||||||
acidv1.Sidecar{
|
acidv1.Sidecar{
|
||||||
Name: "cluster-specific-sidecar-with-resources",
|
Name: "cluster-specific-sidecar-with-resources",
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
acidv1.Sidecar{
|
acidv1.Sidecar{
|
||||||
|
|
@ -2301,6 +2378,30 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
// With PDBMasterLabelSelector disabled.
|
||||||
|
{
|
||||||
|
New(
|
||||||
|
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", PDBMasterLabelSelector: util.False()}},
|
||||||
|
k8sutil.KubernetesClient{},
|
||||||
|
acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||||
|
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
|
||||||
|
logger,
|
||||||
|
eventRecorder),
|
||||||
|
policyv1.PodDisruptionBudget{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "postgres-myapp-database-pdb",
|
||||||
|
Namespace: "myapp",
|
||||||
|
Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"},
|
||||||
|
},
|
||||||
|
Spec: policyv1.PodDisruptionBudgetSpec{
|
||||||
|
MinAvailable: util.ToIntStr(1),
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{"cluster-name": "myapp-database"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
|
@ -2318,8 +2419,8 @@ func TestGenerateService(t *testing.T) {
|
||||||
spec = acidv1.PostgresSpec{
|
spec = acidv1.PostgresSpec{
|
||||||
TeamID: "myapp", NumberOfInstances: 1,
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
|
|
@ -2331,8 +2432,8 @@ func TestGenerateService(t *testing.T) {
|
||||||
acidv1.Sidecar{
|
acidv1.Sidecar{
|
||||||
Name: "cluster-specific-sidecar-with-resources",
|
Name: "cluster-specific-sidecar-with-resources",
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
acidv1.Sidecar{
|
acidv1.Sidecar{
|
||||||
|
|
@ -2545,8 +2646,8 @@ func TestEnableLoadBalancers(t *testing.T) {
|
||||||
EnableReplicaPoolerLoadBalancer: util.False(),
|
EnableReplicaPoolerLoadBalancer: util.False(),
|
||||||
NumberOfInstances: 1,
|
NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
|
|
@ -2592,8 +2693,8 @@ func TestEnableLoadBalancers(t *testing.T) {
|
||||||
EnableReplicaPoolerLoadBalancer: util.True(),
|
EnableReplicaPoolerLoadBalancer: util.True(),
|
||||||
NumberOfInstances: 1,
|
NumberOfInstances: 1,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
|
|
@ -2640,7 +2741,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
clusterNameLabel := "cluster-name"
|
clusterNameLabel := "cluster-name"
|
||||||
sidecarName := "postgres-exporter"
|
sidecarName := "postgres-exporter"
|
||||||
|
|
||||||
// enforceMinResourceLimits will be called 2 twice emitting 4 events (2x cpu, 2x memory raise)
|
// enforceMinResourceLimits will be called 2 times emitting 4 events (2x cpu, 2x memory raise)
|
||||||
// enforceMaxResourceRequests will be called 4 times emitting 6 events (2x cpu, 4x memory cap)
|
// enforceMaxResourceRequests will be called 4 times emitting 6 events (2x cpu, 4x memory cap)
|
||||||
// hence event bufferSize of 10 is required
|
// hence event bufferSize of 10 is required
|
||||||
newEventRecorder := record.NewFakeRecorder(10)
|
newEventRecorder := record.NewFakeRecorder(10)
|
||||||
|
|
@ -2685,8 +2786,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2714,8 +2815,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2732,7 +2833,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "50Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("50m"), Memory: k8sutil.StringToPointer("50Mi")},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
|
|
@ -2741,8 +2842,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "50Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("50m"), Memory: k8sutil.StringToPointer("50Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2759,8 +2860,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{Memory: "100Mi"},
|
ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{Memory: "1Gi"},
|
ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("1Gi")},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
|
|
@ -2769,8 +2870,97 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "1Gi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("1Gi")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "test generation of resources when default is not defined",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterLabels: map[string]string{"application": "spilo"},
|
||||||
|
ClusterNameLabel: clusterNameLabel,
|
||||||
|
DefaultCPURequest: "100m",
|
||||||
|
DefaultMemoryRequest: "100Mi",
|
||||||
|
PodRoleLabel: "spilo-role",
|
||||||
|
},
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
SetMemoryRequestToLimit: false,
|
||||||
|
},
|
||||||
|
pgSpec: acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
TeamID: "acid",
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "test matchLimitsWithRequestsIfSmaller",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: configResources,
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
SetMemoryRequestToLimit: false,
|
||||||
|
},
|
||||||
|
pgSpec: acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("750Mi")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("300Mi")},
|
||||||
|
},
|
||||||
|
TeamID: "acid",
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("750Mi")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("750Mi")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "defaults are not defined but minimum limit is",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterLabels: map[string]string{"application": "spilo"},
|
||||||
|
ClusterNameLabel: clusterNameLabel,
|
||||||
|
MinMemoryLimit: "250Mi",
|
||||||
|
PodRoleLabel: "spilo-role",
|
||||||
|
},
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
SetMemoryRequestToLimit: false,
|
||||||
|
},
|
||||||
|
pgSpec: acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
|
},
|
||||||
|
TeamID: "acid",
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2787,8 +2977,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{Memory: "200Mi"},
|
ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("200Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{Memory: "300Mi"},
|
ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("300Mi")},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
|
|
@ -2797,8 +2987,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "300Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("300Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "300Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("300Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2818,8 +3008,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
acidv1.Sidecar{
|
acidv1.Sidecar{
|
||||||
Name: sidecarName,
|
Name: sidecarName,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -2830,8 +3020,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "100Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2848,8 +3038,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "250Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("250Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "400m", Memory: "800Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("400m"), Memory: k8sutil.StringToPointer("800Mi")},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
|
|
@ -2858,8 +3048,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "250Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("250Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "400m", Memory: "800Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("400m"), Memory: k8sutil.StringToPointer("800Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2876,8 +3066,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "200m", Memory: "200Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("200m"), Memory: k8sutil.StringToPointer("200Mi")},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
|
|
@ -2886,8 +3076,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "250m", Memory: "250Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("250m"), Memory: k8sutil.StringToPointer("250Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2907,8 +3097,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
acidv1.Sidecar{
|
acidv1.Sidecar{
|
||||||
Name: sidecarName,
|
Name: sidecarName,
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -2919,8 +3109,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2937,8 +3127,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "2Gi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "2", Memory: "4Gi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
|
|
@ -2947,8 +3137,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "500m", Memory: "1Gi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("500m"), Memory: k8sutil.StringToPointer("1Gi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "2", Memory: "4Gi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -2965,8 +3155,8 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{Memory: "500Mi"},
|
ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{Memory: "2Gi"},
|
ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("2Gi")},
|
||||||
},
|
},
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
|
|
@ -2975,8 +3165,133 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "1Gi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("1Gi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "2Gi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "test HugePages are not set on container when not requested in manifest",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: configResources,
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
},
|
||||||
|
pgSpec: acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{},
|
||||||
|
},
|
||||||
|
TeamID: "acid",
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{
|
||||||
|
CPU: k8sutil.StringToPointer("100m"),
|
||||||
|
Memory: k8sutil.StringToPointer("100Mi"),
|
||||||
|
},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{
|
||||||
|
CPU: k8sutil.StringToPointer("1"),
|
||||||
|
Memory: k8sutil.StringToPointer("500Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "test HugePages are passed through to the postgres container",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: configResources,
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
},
|
||||||
|
pgSpec: acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{
|
||||||
|
HugePages2Mi: k8sutil.StringToPointer("128Mi"),
|
||||||
|
HugePages1Gi: k8sutil.StringToPointer("1Gi"),
|
||||||
|
},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{
|
||||||
|
HugePages2Mi: k8sutil.StringToPointer("256Mi"),
|
||||||
|
HugePages1Gi: k8sutil.StringToPointer("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TeamID: "acid",
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{
|
||||||
|
CPU: k8sutil.StringToPointer("100m"),
|
||||||
|
Memory: k8sutil.StringToPointer("100Mi"),
|
||||||
|
HugePages2Mi: k8sutil.StringToPointer("128Mi"),
|
||||||
|
HugePages1Gi: k8sutil.StringToPointer("1Gi"),
|
||||||
|
},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{
|
||||||
|
CPU: k8sutil.StringToPointer("1"),
|
||||||
|
Memory: k8sutil.StringToPointer("500Mi"),
|
||||||
|
HugePages2Mi: k8sutil.StringToPointer("256Mi"),
|
||||||
|
HugePages1Gi: k8sutil.StringToPointer("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "test HugePages are passed through on sidecars",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: configResources,
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
},
|
||||||
|
pgSpec: acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
Sidecars: []acidv1.Sidecar{
|
||||||
|
{
|
||||||
|
Name: "test-sidecar",
|
||||||
|
DockerImage: "test-image",
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{
|
||||||
|
HugePages2Mi: k8sutil.StringToPointer("128Mi"),
|
||||||
|
HugePages1Gi: k8sutil.StringToPointer("1Gi"),
|
||||||
|
},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{
|
||||||
|
HugePages2Mi: k8sutil.StringToPointer("256Mi"),
|
||||||
|
HugePages1Gi: k8sutil.StringToPointer("2Gi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TeamID: "acid",
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{
|
||||||
|
CPU: k8sutil.StringToPointer("100m"),
|
||||||
|
Memory: k8sutil.StringToPointer("100Mi"),
|
||||||
|
HugePages2Mi: k8sutil.StringToPointer("128Mi"),
|
||||||
|
HugePages1Gi: k8sutil.StringToPointer("1Gi"),
|
||||||
|
},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{
|
||||||
|
CPU: k8sutil.StringToPointer("1"),
|
||||||
|
Memory: k8sutil.StringToPointer("500Mi"),
|
||||||
|
HugePages2Mi: k8sutil.StringToPointer("256Mi"),
|
||||||
|
HugePages1Gi: k8sutil.StringToPointer("2Gi"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -3009,7 +3324,9 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
|
|
||||||
func TestGenerateLogicalBackupJob(t *testing.T) {
|
func TestGenerateLogicalBackupJob(t *testing.T) {
|
||||||
clusterName := "acid-test-cluster"
|
clusterName := "acid-test-cluster"
|
||||||
|
teamId := "test"
|
||||||
configResources := config.Resources{
|
configResources := config.Resources{
|
||||||
|
ClusterNameLabel: "cluster-name",
|
||||||
DefaultCPURequest: "100m",
|
DefaultCPURequest: "100m",
|
||||||
DefaultCPULimit: "1",
|
DefaultCPULimit: "1",
|
||||||
DefaultMemoryRequest: "100Mi",
|
DefaultMemoryRequest: "100Mi",
|
||||||
|
|
@ -3017,12 +3334,14 @@ func TestGenerateLogicalBackupJob(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
subTest string
|
subTest string
|
||||||
config config.Config
|
config config.Config
|
||||||
specSchedule string
|
specSchedule string
|
||||||
expectedSchedule string
|
expectedSchedule string
|
||||||
expectedJobName string
|
expectedJobName string
|
||||||
expectedResources acidv1.Resources
|
expectedResources acidv1.Resources
|
||||||
|
expectedAnnotation map[string]string
|
||||||
|
expectedLabel map[string]string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
subTest: "test generation of logical backup pod resources when not configured",
|
subTest: "test generation of logical backup pod resources when not configured",
|
||||||
|
|
@ -3039,9 +3358,11 @@ func TestGenerateLogicalBackupJob(t *testing.T) {
|
||||||
expectedSchedule: "30 00 * * *",
|
expectedSchedule: "30 00 * * *",
|
||||||
expectedJobName: "logical-backup-acid-test-cluster",
|
expectedJobName: "logical-backup-acid-test-cluster",
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
},
|
},
|
||||||
|
expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId},
|
||||||
|
expectedAnnotation: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "test generation of logical backup pod resources when configured",
|
subTest: "test generation of logical backup pod resources when configured",
|
||||||
|
|
@ -3062,9 +3383,11 @@ func TestGenerateLogicalBackupJob(t *testing.T) {
|
||||||
expectedSchedule: "30 00 * * 7",
|
expectedSchedule: "30 00 * * 7",
|
||||||
expectedJobName: "lb-acid-test-cluster",
|
expectedJobName: "lb-acid-test-cluster",
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "50Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("50Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "300m", Memory: "300Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("300m"), Memory: k8sutil.StringToPointer("300Mi")},
|
||||||
},
|
},
|
||||||
|
expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId},
|
||||||
|
expectedAnnotation: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "test generation of logical backup pod resources when partly configured",
|
subTest: "test generation of logical backup pod resources when partly configured",
|
||||||
|
|
@ -3083,9 +3406,11 @@ func TestGenerateLogicalBackupJob(t *testing.T) {
|
||||||
expectedSchedule: "30 00 * * *",
|
expectedSchedule: "30 00 * * *",
|
||||||
expectedJobName: "acid-test-cluster",
|
expectedJobName: "acid-test-cluster",
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "100Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("50m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "250m", Memory: "500Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("250m"), Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
},
|
},
|
||||||
|
expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId},
|
||||||
|
expectedAnnotation: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "test generation of logical backup pod resources with SetMemoryRequestToLimit enabled",
|
subTest: "test generation of logical backup pod resources with SetMemoryRequestToLimit enabled",
|
||||||
|
|
@ -3104,9 +3429,55 @@ func TestGenerateLogicalBackupJob(t *testing.T) {
|
||||||
expectedSchedule: "30 00 * * *",
|
expectedSchedule: "30 00 * * *",
|
||||||
expectedJobName: "test-long-prefix-so-name-must-be-trimmed-acid-test-c",
|
expectedJobName: "test-long-prefix-so-name-must-be-trimmed-acid-test-c",
|
||||||
expectedResources: acidv1.Resources{
|
expectedResources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "200Mi"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("200Mi")},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "200Mi"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("200Mi")},
|
||||||
},
|
},
|
||||||
|
expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId},
|
||||||
|
expectedAnnotation: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "test generation of pod annotations when cluster InheritedLabel is set",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterNameLabel: "cluster-name",
|
||||||
|
InheritedLabels: []string{"labelKey"},
|
||||||
|
DefaultCPURequest: "100m",
|
||||||
|
DefaultCPULimit: "1",
|
||||||
|
DefaultMemoryRequest: "100Mi",
|
||||||
|
DefaultMemoryLimit: "500Mi",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
specSchedule: "",
|
||||||
|
expectedJobName: "acid-test-cluster",
|
||||||
|
expectedSchedule: "",
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
|
},
|
||||||
|
expectedLabel: map[string]string{"labelKey": "labelValue", "cluster-name": clusterName, "team": teamId},
|
||||||
|
expectedAnnotation: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "test generation of pod annotations when cluster InheritedAnnotations is set",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterNameLabel: "cluster-name",
|
||||||
|
InheritedAnnotations: []string{"annotationKey"},
|
||||||
|
DefaultCPURequest: "100m",
|
||||||
|
DefaultCPULimit: "1",
|
||||||
|
DefaultMemoryRequest: "100Mi",
|
||||||
|
DefaultMemoryLimit: "500Mi",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
specSchedule: "",
|
||||||
|
expectedJobName: "acid-test-cluster",
|
||||||
|
expectedSchedule: "",
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")},
|
||||||
|
},
|
||||||
|
expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId},
|
||||||
|
expectedAnnotation: map[string]string{"annotationKey": "annotationValue"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3115,12 +3486,19 @@ func TestGenerateLogicalBackupJob(t *testing.T) {
|
||||||
Config{
|
Config{
|
||||||
OpConfig: tt.config,
|
OpConfig: tt.config,
|
||||||
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder)
|
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
cluster.ObjectMeta.Name = clusterName
|
cluster.ObjectMeta.Name = clusterName
|
||||||
|
cluster.Spec.TeamID = teamId
|
||||||
|
if cluster.ObjectMeta.Labels == nil {
|
||||||
|
cluster.ObjectMeta.Labels = make(map[string]string)
|
||||||
|
}
|
||||||
|
if cluster.ObjectMeta.Annotations == nil {
|
||||||
|
cluster.ObjectMeta.Annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
cluster.ObjectMeta.Labels["labelKey"] = "labelValue"
|
||||||
|
cluster.ObjectMeta.Annotations["annotationKey"] = "annotationValue"
|
||||||
cluster.Spec.LogicalBackupSchedule = tt.specSchedule
|
cluster.Spec.LogicalBackupSchedule = tt.specSchedule
|
||||||
cronJob, err := cluster.generateLogicalBackupJob()
|
cronJob, err := cluster.generateLogicalBackupJob()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
if cronJob.Spec.Schedule != tt.expectedSchedule {
|
if cronJob.Spec.Schedule != tt.expectedSchedule {
|
||||||
t.Errorf("%s - %s: expected schedule %s, got %s", t.Name(), tt.subTest, tt.expectedSchedule, cronJob.Spec.Schedule)
|
t.Errorf("%s - %s: expected schedule %s, got %s", t.Name(), tt.subTest, tt.expectedSchedule, cronJob.Spec.Schedule)
|
||||||
}
|
}
|
||||||
|
|
@ -3129,6 +3507,14 @@ func TestGenerateLogicalBackupJob(t *testing.T) {
|
||||||
t.Errorf("%s - %s: expected job name %s, got %s", t.Name(), tt.subTest, tt.expectedJobName, cronJob.Name)
|
t.Errorf("%s - %s: expected job name %s, got %s", t.Name(), tt.subTest, tt.expectedJobName, cronJob.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(cronJob.Labels, tt.expectedLabel) {
|
||||||
|
t.Errorf("%s - %s: expected labels %s, got %s", t.Name(), tt.subTest, tt.expectedLabel, cronJob.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(cronJob.Annotations, tt.expectedAnnotation) {
|
||||||
|
t.Errorf("%s - %s: expected annotations %s, got %s", t.Name(), tt.subTest, tt.expectedAnnotation, cronJob.Annotations)
|
||||||
|
}
|
||||||
|
|
||||||
containers := cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers
|
containers := cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers
|
||||||
clusterResources, err := parseResourceRequirements(containers[0].Resources)
|
clusterResources, err := parseResourceRequirements(containers[0].Resources)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
||||||
|
|
@ -11,13 +11,12 @@ import (
|
||||||
|
|
||||||
// VersionMap Map of version numbers
|
// VersionMap Map of version numbers
|
||||||
var VersionMap = map[string]int{
|
var VersionMap = map[string]int{
|
||||||
"10": 100000,
|
|
||||||
"11": 110000,
|
"11": 110000,
|
||||||
"12": 120000,
|
"12": 120000,
|
||||||
"13": 130000,
|
"13": 130000,
|
||||||
"14": 140000,
|
"14": 140000,
|
||||||
"15": 150000,
|
"15": 150000,
|
||||||
|
"16": 160000,
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsBiggerPostgresVersion Compare two Postgres version numbers
|
// IsBiggerPostgresVersion Compare two Postgres version numbers
|
||||||
|
|
@ -36,7 +35,7 @@ func (c *Cluster) GetDesiredMajorVersionAsInt() int {
|
||||||
func (c *Cluster) GetDesiredMajorVersion() string {
|
func (c *Cluster) GetDesiredMajorVersion() string {
|
||||||
|
|
||||||
if c.Config.OpConfig.MajorVersionUpgradeMode == "full" {
|
if c.Config.OpConfig.MajorVersionUpgradeMode == "full" {
|
||||||
// e.g. current is 10, minimal is 11 allowing 11 to 15 clusters, everything below is upgraded
|
// e.g. current is 12, minimal is 12 allowing 12 to 16 clusters, everything below is upgraded
|
||||||
if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {
|
if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {
|
||||||
c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)
|
c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)
|
||||||
return c.Config.OpConfig.TargetMajorVersion
|
return c.Config.OpConfig.TargetMajorVersion
|
||||||
|
|
@ -98,37 +97,43 @@ func (c *Cluster) majorVersionUpgrade() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Recheck version with newest data from Patroni
|
||||||
|
if c.currentMajorVersion >= desiredVersion {
|
||||||
|
c.logger.Infof("recheck cluster version is already up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
numberOfPods := len(pods)
|
numberOfPods := len(pods)
|
||||||
if allRunning && masterPod != nil {
|
if allRunning && masterPod != nil {
|
||||||
c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion)
|
c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion)
|
||||||
if c.currentMajorVersion < desiredVersion {
|
if c.currentMajorVersion < desiredVersion {
|
||||||
podName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name}
|
podName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name}
|
||||||
c.logger.Infof("triggering major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
c.logger.Infof("triggering major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
||||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "Starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
||||||
upgradeCommand := fmt.Sprintf("set -o pipefail && /usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods)
|
upgradeCommand := fmt.Sprintf("set -o pipefail && /usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods)
|
||||||
|
|
||||||
c.logger.Debugf("checking if the spilo image runs with root or non-root (check for user id=0)")
|
c.logger.Debugf("checking if the spilo image runs with root or non-root (check for user id=0)")
|
||||||
resultIdCheck, errIdCheck := c.ExecCommand(podName, "/bin/bash", "-c", "/usr/bin/id -u")
|
resultIdCheck, errIdCheck := c.ExecCommand(podName, "/bin/bash", "-c", "/usr/bin/id -u")
|
||||||
if errIdCheck != nil {
|
if errIdCheck != nil {
|
||||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "Checking user id to run upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, errIdCheck)
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "checking user id to run upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, errIdCheck)
|
||||||
}
|
}
|
||||||
|
|
||||||
resultIdCheck = strings.TrimSuffix(resultIdCheck, "\n")
|
resultIdCheck = strings.TrimSuffix(resultIdCheck, "\n")
|
||||||
var result string
|
var result string
|
||||||
if resultIdCheck != "0" {
|
if resultIdCheck != "0" {
|
||||||
c.logger.Infof("User id was identified as: %s, hence default user is non-root already", resultIdCheck)
|
c.logger.Infof("user id was identified as: %s, hence default user is non-root already", resultIdCheck)
|
||||||
result, err = c.ExecCommand(podName, "/bin/bash", "-c", upgradeCommand)
|
result, err = c.ExecCommand(podName, "/bin/bash", "-c", upgradeCommand)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Infof("User id was identified as: %s, using su to reach the postgres user", resultIdCheck)
|
c.logger.Infof("user id was identified as: %s, using su to reach the postgres user", resultIdCheck)
|
||||||
result, err = c.ExecCommand(podName, "/bin/su", "postgres", "-c", upgradeCommand)
|
result, err = c.ExecCommand(podName, "/bin/su", "postgres", "-c", upgradeCommand)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "Upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, err)
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Infof("upgrade action triggered and command completed: %s", result[:100])
|
c.logger.Infof("upgrade action triggered and command completed: %s", result[:100])
|
||||||
|
|
||||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "Upgrade from %d to %d finished", c.currentMajorVersion, desiredVersion)
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "upgrade from %d to %d finished", c.currentMajorVersion, desiredVersion)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
@ -503,7 +505,11 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e
|
||||||
} else {
|
} else {
|
||||||
// in asynchronous mode find running replicas
|
// in asynchronous mode find running replicas
|
||||||
for _, member := range members {
|
for _, member := range members {
|
||||||
if PostgresRole(member.Role) != Leader && PostgresRole(member.Role) != StandbyLeader && member.State == "running" {
|
if PostgresRole(member.Role) == Leader || PostgresRole(member.Role) == StandbyLeader {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if slices.Contains([]string{"running", "streaming", "in archive recovery"}, member.State) {
|
||||||
candidates = append(candidates, member)
|
candidates = append(candidates, member)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@ package cluster
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -42,28 +42,28 @@ func TestGetSwitchoverCandidate(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
subtest: "choose sync_standby over replica",
|
subtest: "choose sync_standby over replica",
|
||||||
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "sync_standby", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 0}]}`,
|
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "sync_standby", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}, {"name": "acid-test-cluster-2", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 0}]}`,
|
||||||
syncModeEnabled: true,
|
syncModeEnabled: true,
|
||||||
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"},
|
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"},
|
||||||
expectedError: nil,
|
expectedError: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "no running sync_standby available",
|
subtest: "no running sync_standby available",
|
||||||
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}]}`,
|
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}]}`,
|
||||||
syncModeEnabled: true,
|
syncModeEnabled: true,
|
||||||
expectedCandidate: spec.NamespacedName{},
|
expectedCandidate: spec.NamespacedName{},
|
||||||
expectedError: fmt.Errorf("failed to get Patroni cluster members: unexpected end of JSON input"),
|
expectedError: fmt.Errorf("failed to get Patroni cluster members: unexpected end of JSON input"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "choose replica with lowest lag",
|
subtest: "choose replica with lowest lag",
|
||||||
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 2}]}`,
|
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 2}]}`,
|
||||||
syncModeEnabled: false,
|
syncModeEnabled: false,
|
||||||
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-2"},
|
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-2"},
|
||||||
expectedError: nil,
|
expectedError: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "choose first replica when lag is equal evrywhere",
|
subtest: "choose first replica when lag is equal evrywhere",
|
||||||
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 5}]}`,
|
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 5}]}`,
|
||||||
syncModeEnabled: false,
|
syncModeEnabled: false,
|
||||||
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"},
|
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"},
|
||||||
expectedError: nil,
|
expectedError: nil,
|
||||||
|
|
@ -75,11 +75,18 @@ func TestGetSwitchoverCandidate(t *testing.T) {
|
||||||
expectedCandidate: spec.NamespacedName{},
|
expectedCandidate: spec.NamespacedName{},
|
||||||
expectedError: fmt.Errorf("no switchover candidate found"),
|
expectedError: fmt.Errorf("no switchover candidate found"),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
subtest: "replicas with different status",
|
||||||
|
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "in archive recovery", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 2}]}`,
|
||||||
|
syncModeEnabled: false,
|
||||||
|
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-2"},
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
// mocking cluster members
|
// mocking cluster members
|
||||||
r := ioutil.NopCloser(bytes.NewReader([]byte(tt.clusterJson)))
|
r := io.NopCloser(bytes.NewReader([]byte(tt.clusterJson)))
|
||||||
|
|
||||||
response := http.Response{
|
response := http.Response{
|
||||||
StatusCode: 200,
|
StatusCode: 200,
|
||||||
|
|
|
||||||
|
|
@ -243,13 +243,17 @@ func (c *Cluster) deleteStatefulSet() error {
|
||||||
c.setProcessName("deleting statefulset")
|
c.setProcessName("deleting statefulset")
|
||||||
c.logger.Debugln("deleting statefulset")
|
c.logger.Debugln("deleting statefulset")
|
||||||
if c.Statefulset == nil {
|
if c.Statefulset == nil {
|
||||||
return fmt.Errorf("there is no statefulset in the cluster")
|
c.logger.Debug("there is no statefulset in the cluster")
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Delete(context.TODO(), c.Statefulset.Name, c.deleteOptions)
|
err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Delete(context.TODO(), c.Statefulset.Name, c.deleteOptions)
|
||||||
if err != nil {
|
if k8sutil.ResourceNotFound(err) {
|
||||||
|
c.logger.Debugf("statefulset %q has already been deleted", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
||||||
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Infof("statefulset %q has been deleted", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
c.logger.Infof("statefulset %q has been deleted", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
||||||
c.Statefulset = nil
|
c.Statefulset = nil
|
||||||
|
|
||||||
|
|
@ -257,8 +261,12 @@ func (c *Cluster) deleteStatefulSet() error {
|
||||||
return fmt.Errorf("could not delete pods: %v", err)
|
return fmt.Errorf("could not delete pods: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.deletePersistentVolumeClaims(); err != nil {
|
if c.OpConfig.EnablePersistentVolumeClaimDeletion != nil && *c.OpConfig.EnablePersistentVolumeClaimDeletion {
|
||||||
return fmt.Errorf("could not delete PersistentVolumeClaims: %v", err)
|
if err := c.deletePersistentVolumeClaims(); err != nil {
|
||||||
|
return fmt.Errorf("could not delete PersistentVolumeClaims: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.logger.Info("not deleting PersistentVolumeClaims because disabled in configuration")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -336,18 +344,21 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe
|
||||||
func (c *Cluster) deleteService(role PostgresRole) error {
|
func (c *Cluster) deleteService(role PostgresRole) error {
|
||||||
c.logger.Debugf("deleting service %s", role)
|
c.logger.Debugf("deleting service %s", role)
|
||||||
|
|
||||||
service, ok := c.Services[role]
|
if c.Services[role] == nil {
|
||||||
if !ok {
|
|
||||||
c.logger.Debugf("No service for %s role was found, nothing to delete", role)
|
c.logger.Debugf("No service for %s role was found, nothing to delete", role)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.KubeClient.Services(service.Namespace).Delete(context.TODO(), service.Name, c.deleteOptions); err != nil {
|
if err := c.KubeClient.Services(c.Services[role].Namespace).Delete(context.TODO(), c.Services[role].Name, c.deleteOptions); err != nil {
|
||||||
return err
|
if k8sutil.ResourceNotFound(err) {
|
||||||
|
c.logger.Debugf("%s service has already been deleted", role)
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Infof("%s service %q has been deleted", role, util.NameFromMeta(service.ObjectMeta))
|
c.logger.Infof("%s service %q has been deleted", role, util.NameFromMeta(c.Services[role].ObjectMeta))
|
||||||
c.Services[role] = nil
|
delete(c.Services, role)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -441,16 +452,20 @@ func (c *Cluster) updatePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) e
|
||||||
func (c *Cluster) deletePodDisruptionBudget() error {
|
func (c *Cluster) deletePodDisruptionBudget() error {
|
||||||
c.logger.Debug("deleting pod disruption budget")
|
c.logger.Debug("deleting pod disruption budget")
|
||||||
if c.PodDisruptionBudget == nil {
|
if c.PodDisruptionBudget == nil {
|
||||||
return fmt.Errorf("there is no pod disruption budget in the cluster")
|
c.logger.Debug("there is no pod disruption budget in the cluster")
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pdbName := util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta)
|
pdbName := util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta)
|
||||||
err := c.KubeClient.
|
err := c.KubeClient.
|
||||||
PodDisruptionBudgets(c.PodDisruptionBudget.Namespace).
|
PodDisruptionBudgets(c.PodDisruptionBudget.Namespace).
|
||||||
Delete(context.TODO(), c.PodDisruptionBudget.Name, c.deleteOptions)
|
Delete(context.TODO(), c.PodDisruptionBudget.Name, c.deleteOptions)
|
||||||
if err != nil {
|
if k8sutil.ResourceNotFound(err) {
|
||||||
return fmt.Errorf("could not delete pod disruption budget: %v", err)
|
c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta))
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("could not delete PodDisruptionBudget: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta))
|
c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta))
|
||||||
c.PodDisruptionBudget = nil
|
c.PodDisruptionBudget = nil
|
||||||
|
|
||||||
|
|
@ -476,17 +491,20 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error {
|
||||||
c.setProcessName("deleting endpoint")
|
c.setProcessName("deleting endpoint")
|
||||||
c.logger.Debugln("deleting endpoint")
|
c.logger.Debugln("deleting endpoint")
|
||||||
if c.Endpoints[role] == nil {
|
if c.Endpoints[role] == nil {
|
||||||
return fmt.Errorf("there is no %s endpoint in the cluster", role)
|
c.logger.Debugf("there is no %s endpoint in the cluster", role)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(
|
if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil {
|
||||||
context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil {
|
if k8sutil.ResourceNotFound(err) {
|
||||||
return fmt.Errorf("could not delete endpoint: %v", err)
|
c.logger.Debugf("%s endpoint has already been deleted", role)
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("could not delete endpoint: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Infof("endpoint %q has been deleted", util.NameFromMeta(c.Endpoints[role].ObjectMeta))
|
c.logger.Infof("%s endpoint %q has been deleted", role, util.NameFromMeta(c.Endpoints[role].ObjectMeta))
|
||||||
|
delete(c.Endpoints, role)
|
||||||
c.Endpoints[role] = nil
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -514,7 +532,9 @@ func (c *Cluster) deleteSecret(uid types.UID, secret v1.Secret) error {
|
||||||
secretName := util.NameFromMeta(secret.ObjectMeta)
|
secretName := util.NameFromMeta(secret.ObjectMeta)
|
||||||
c.logger.Debugf("deleting secret %q", secretName)
|
c.logger.Debugf("deleting secret %q", secretName)
|
||||||
err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions)
|
err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions)
|
||||||
if err != nil {
|
if k8sutil.ResourceNotFound(err) {
|
||||||
|
c.logger.Debugf("secret %q has already been deleted", secretName)
|
||||||
|
} else if err != nil {
|
||||||
return fmt.Errorf("could not delete secret %q: %v", secretName, err)
|
return fmt.Errorf("could not delete secret %q: %v", secretName, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("secret %q has been deleted", secretName)
|
c.logger.Infof("secret %q has been deleted", secretName)
|
||||||
|
|
@ -573,7 +593,14 @@ func (c *Cluster) deleteLogicalBackupJob() error {
|
||||||
|
|
||||||
c.logger.Info("removing the logical backup job")
|
c.logger.Info("removing the logical backup job")
|
||||||
|
|
||||||
return c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions)
|
err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions)
|
||||||
|
if k8sutil.ResourceNotFound(err) {
|
||||||
|
c.logger.Debugf("logical backup cron job %q has already been deleted", c.getLogicalBackupJobName())
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetServiceMaster returns cluster's kubernetes master Service
|
// GetServiceMaster returns cluster's kubernetes master Service
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
@ -13,6 +14,7 @@ import (
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Cluster) createStreams(appId string) (*zalandov1.FabricEventStream, error) {
|
func (c *Cluster) createStreams(appId string) (*zalandov1.FabricEventStream, error) {
|
||||||
|
|
@ -29,8 +31,12 @@ func (c *Cluster) createStreams(appId string) (*zalandov1.FabricEventStream, err
|
||||||
|
|
||||||
func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) error {
|
func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) error {
|
||||||
c.setProcessName("updating event streams")
|
c.setProcessName("updating event streams")
|
||||||
|
patch, err := json.Marshal(newEventStreams)
|
||||||
if _, err := c.KubeClient.FabricEventStreams(newEventStreams.Namespace).Update(context.TODO(), newEventStreams, metav1.UpdateOptions{}); err != nil {
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not marshal new event stream CRD %q: %v", newEventStreams.Name, err)
|
||||||
|
}
|
||||||
|
if _, err := c.KubeClient.FabricEventStreams(newEventStreams.Namespace).Patch(
|
||||||
|
context.TODO(), newEventStreams.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -145,11 +151,13 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent
|
||||||
streamSource := c.getEventStreamSource(stream, tableName, table.IdColumn)
|
streamSource := c.getEventStreamSource(stream, tableName, table.IdColumn)
|
||||||
streamFlow := getEventStreamFlow(stream, table.PayloadColumn)
|
streamFlow := getEventStreamFlow(stream, table.PayloadColumn)
|
||||||
streamSink := getEventStreamSink(stream, table.EventType)
|
streamSink := getEventStreamSink(stream, table.EventType)
|
||||||
|
streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType)
|
||||||
|
|
||||||
eventStreams = append(eventStreams, zalandov1.EventStream{
|
eventStreams = append(eventStreams, zalandov1.EventStream{
|
||||||
EventStreamFlow: streamFlow,
|
EventStreamFlow: streamFlow,
|
||||||
EventStreamSink: streamSink,
|
EventStreamRecovery: streamRecovery,
|
||||||
EventStreamSource: streamSource})
|
EventStreamSink: streamSink,
|
||||||
|
EventStreamSource: streamSource})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -204,6 +212,28 @@ func getEventStreamSink(stream acidv1.Stream, eventType string) zalandov1.EventS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string) zalandov1.EventStreamRecovery {
|
||||||
|
if (stream.EnableRecovery != nil && !*stream.EnableRecovery) ||
|
||||||
|
(stream.EnableRecovery == nil && recoveryEventType == "") {
|
||||||
|
return zalandov1.EventStreamRecovery{
|
||||||
|
Type: constants.EventStreamRecoveryNoneType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if stream.EnableRecovery != nil && *stream.EnableRecovery && recoveryEventType == "" {
|
||||||
|
recoveryEventType = fmt.Sprintf("%s-%s", eventType, constants.EventStreamRecoverySuffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
return zalandov1.EventStreamRecovery{
|
||||||
|
Type: constants.EventStreamRecoveryDLQType,
|
||||||
|
Sink: &zalandov1.EventStreamSink{
|
||||||
|
Type: constants.EventStreamSinkNakadiType,
|
||||||
|
EventType: recoveryEventType,
|
||||||
|
MaxBatchSize: stream.BatchSize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func getTableSchema(fullTableName string) (tableName, schemaName string) {
|
func getTableSchema(fullTableName string) (tableName, schemaName string) {
|
||||||
schemaName = "public"
|
schemaName = "public"
|
||||||
tableName = fullTableName
|
tableName = fullTableName
|
||||||
|
|
@ -381,7 +411,8 @@ func sameStreams(curEventStreams, newEventStreams []zalandov1.EventStream) (matc
|
||||||
for _, curStream := range curEventStreams {
|
for _, curStream := range curEventStreams {
|
||||||
if reflect.DeepEqual(newStream.EventStreamSource, curStream.EventStreamSource) &&
|
if reflect.DeepEqual(newStream.EventStreamSource, curStream.EventStreamSource) &&
|
||||||
reflect.DeepEqual(newStream.EventStreamFlow, curStream.EventStreamFlow) &&
|
reflect.DeepEqual(newStream.EventStreamFlow, curStream.EventStreamFlow) &&
|
||||||
reflect.DeepEqual(newStream.EventStreamSink, curStream.EventStreamSink) {
|
reflect.DeepEqual(newStream.EventStreamSink, curStream.EventStreamSink) &&
|
||||||
|
reflect.DeepEqual(newStream.EventStreamRecovery, curStream.EventStreamRecovery) {
|
||||||
match = true
|
match = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -65,9 +65,11 @@ var (
|
||||||
PayloadColumn: k8sutil.StringToPointer("b_payload"),
|
PayloadColumn: k8sutil.StringToPointer("b_payload"),
|
||||||
},
|
},
|
||||||
"data.foobar": acidv1.StreamTable{
|
"data.foobar": acidv1.StreamTable{
|
||||||
EventType: "stream-type-b",
|
EventType: "stream-type-b",
|
||||||
|
RecoveryEventType: "stream-type-b-dlq",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
EnableRecovery: util.True(),
|
||||||
Filter: map[string]*string{
|
Filter: map[string]*string{
|
||||||
"data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"),
|
"data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"),
|
||||||
},
|
},
|
||||||
|
|
@ -106,6 +108,14 @@ var (
|
||||||
PayloadColumn: k8sutil.StringToPointer("b_payload"),
|
PayloadColumn: k8sutil.StringToPointer("b_payload"),
|
||||||
Type: constants.EventStreamFlowPgGenericType,
|
Type: constants.EventStreamFlowPgGenericType,
|
||||||
},
|
},
|
||||||
|
EventStreamRecovery: zalandov1.EventStreamRecovery{
|
||||||
|
Type: constants.EventStreamRecoveryDLQType,
|
||||||
|
Sink: &zalandov1.EventStreamSink{
|
||||||
|
EventType: fmt.Sprintf("%s-%s", "stream-type-a", constants.EventStreamRecoverySuffix),
|
||||||
|
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||||
|
Type: constants.EventStreamSinkNakadiType,
|
||||||
|
},
|
||||||
|
},
|
||||||
EventStreamSink: zalandov1.EventStreamSink{
|
EventStreamSink: zalandov1.EventStreamSink{
|
||||||
EventType: "stream-type-a",
|
EventType: "stream-type-a",
|
||||||
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||||
|
|
@ -136,6 +146,14 @@ var (
|
||||||
EventStreamFlow: zalandov1.EventStreamFlow{
|
EventStreamFlow: zalandov1.EventStreamFlow{
|
||||||
Type: constants.EventStreamFlowPgGenericType,
|
Type: constants.EventStreamFlowPgGenericType,
|
||||||
},
|
},
|
||||||
|
EventStreamRecovery: zalandov1.EventStreamRecovery{
|
||||||
|
Type: constants.EventStreamRecoveryDLQType,
|
||||||
|
Sink: &zalandov1.EventStreamSink{
|
||||||
|
EventType: "stream-type-b-dlq",
|
||||||
|
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||||
|
Type: constants.EventStreamSinkNakadiType,
|
||||||
|
},
|
||||||
|
},
|
||||||
EventStreamSink: zalandov1.EventStreamSink{
|
EventStreamSink: zalandov1.EventStreamSink{
|
||||||
EventType: "stream-type-b",
|
EventType: "stream-type-b",
|
||||||
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||||
|
|
@ -251,7 +269,8 @@ func TestSameStreams(t *testing.T) {
|
||||||
testName := "TestSameStreams"
|
testName := "TestSameStreams"
|
||||||
|
|
||||||
stream1 := zalandov1.EventStream{
|
stream1 := zalandov1.EventStream{
|
||||||
EventStreamFlow: zalandov1.EventStreamFlow{},
|
EventStreamFlow: zalandov1.EventStreamFlow{},
|
||||||
|
EventStreamRecovery: zalandov1.EventStreamRecovery{},
|
||||||
EventStreamSink: zalandov1.EventStreamSink{
|
EventStreamSink: zalandov1.EventStreamSink{
|
||||||
EventType: "stream-type-a",
|
EventType: "stream-type-a",
|
||||||
},
|
},
|
||||||
|
|
@ -263,7 +282,23 @@ func TestSameStreams(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
stream2 := zalandov1.EventStream{
|
stream2 := zalandov1.EventStream{
|
||||||
|
EventStreamFlow: zalandov1.EventStreamFlow{},
|
||||||
|
EventStreamRecovery: zalandov1.EventStreamRecovery{},
|
||||||
|
EventStreamSink: zalandov1.EventStreamSink{
|
||||||
|
EventType: "stream-type-b",
|
||||||
|
},
|
||||||
|
EventStreamSource: zalandov1.EventStreamSource{
|
||||||
|
EventStreamTable: zalandov1.EventStreamTable{
|
||||||
|
Name: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
stream3 := zalandov1.EventStream{
|
||||||
EventStreamFlow: zalandov1.EventStreamFlow{},
|
EventStreamFlow: zalandov1.EventStreamFlow{},
|
||||||
|
EventStreamRecovery: zalandov1.EventStreamRecovery{
|
||||||
|
Type: constants.EventStreamRecoveryNoneType,
|
||||||
|
},
|
||||||
EventStreamSink: zalandov1.EventStreamSink{
|
EventStreamSink: zalandov1.EventStreamSink{
|
||||||
EventType: "stream-type-b",
|
EventType: "stream-type-b",
|
||||||
},
|
},
|
||||||
|
|
@ -316,6 +351,13 @@ func TestSameStreams(t *testing.T) {
|
||||||
match: false,
|
match: false,
|
||||||
reason: "number of defined streams is different",
|
reason: "number of defined streams is different",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
subTest: "event stream recovery specs differ",
|
||||||
|
streamsA: []zalandov1.EventStream{stream2},
|
||||||
|
streamsB: []zalandov1.EventStream{stream3},
|
||||||
|
match: false,
|
||||||
|
reason: "event stream specs differ",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
|
@ -389,6 +431,28 @@ func TestUpdateFabricEventStream(t *testing.T) {
|
||||||
|
|
||||||
result := cluster.generateFabricEventStream(appId)
|
result := cluster.generateFabricEventStream(appId)
|
||||||
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match {
|
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match {
|
||||||
t.Errorf("Malformed FabricEventStream, expected %#v, got %#v", streams.Items[0], result)
|
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable recovery
|
||||||
|
for _, stream := range pg.Spec.Streams {
|
||||||
|
if stream.ApplicationId == appId {
|
||||||
|
stream.EnableRecovery = util.False()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
patchData, err = specPatch(pg.Spec)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pgPatched, err = cluster.KubeClient.Postgresqls(namespace).Patch(
|
||||||
|
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
cluster.Postgresql.Spec = pgPatched.Spec
|
||||||
|
err = cluster.createOrUpdateStreams()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
result = cluster.generateFabricEventStream(appId)
|
||||||
|
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match {
|
||||||
|
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1 "k8s.io/api/policy/v1"
|
policyv1 "k8s.io/api/policy/v1"
|
||||||
|
|
@ -40,14 +41,28 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
c.setSpec(newSpec)
|
c.setSpec(newSpec)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
var (
|
||||||
|
pgUpdatedStatus *acidv1.Postgresql
|
||||||
|
errStatus error
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Warningf("error while syncing cluster state: %v", err)
|
c.logger.Warningf("error while syncing cluster state: %v", err)
|
||||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed)
|
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed)
|
||||||
} else if !c.Status.Running() {
|
} else if !c.Status.Running() {
|
||||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
|
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
|
||||||
|
}
|
||||||
|
if errStatus != nil {
|
||||||
|
c.logger.Warningf("could not set cluster status: %v", errStatus)
|
||||||
|
}
|
||||||
|
if pgUpdatedStatus != nil {
|
||||||
|
c.setSpec(pgUpdatedStatus)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if err = c.syncFinalizer(); err != nil {
|
||||||
|
c.logger.Debugf("could not sync finalizers: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if err = c.initUsers(); err != nil {
|
if err = c.initUsers(); err != nil {
|
||||||
err = fmt.Errorf("could not init users: %v", err)
|
err = fmt.Errorf("could not init users: %v", err)
|
||||||
return err
|
return err
|
||||||
|
|
@ -84,6 +99,13 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add or remove standby_cluster section from Patroni config depending on changes in standby section
|
||||||
|
if !reflect.DeepEqual(oldSpec.Spec.StandbyCluster, newSpec.Spec.StandbyCluster) {
|
||||||
|
if err := c.syncStandbyClusterConfiguration(); err != nil {
|
||||||
|
return fmt.Errorf("could not sync StandbyCluster configuration: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c.logger.Debug("syncing pod disruption budgets")
|
c.logger.Debug("syncing pod disruption budgets")
|
||||||
if err = c.syncPodDisruptionBudget(false); err != nil {
|
if err = c.syncPodDisruptionBudget(false); err != nil {
|
||||||
err = fmt.Errorf("could not sync pod disruption budget: %v", err)
|
err = fmt.Errorf("could not sync pod disruption budget: %v", err)
|
||||||
|
|
@ -137,6 +159,20 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) syncFinalizer() error {
|
||||||
|
var err error
|
||||||
|
if c.OpConfig.EnableFinalizers != nil && *c.OpConfig.EnableFinalizers {
|
||||||
|
err = c.addFinalizer()
|
||||||
|
} else {
|
||||||
|
err = c.removeFinalizer()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not sync finalizer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) syncServices() error {
|
func (c *Cluster) syncServices() error {
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
c.logger.Debugf("syncing %s service", role)
|
c.logger.Debugf("syncing %s service", role)
|
||||||
|
|
@ -610,6 +646,9 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
||||||
if desiredPatroniConfig.SynchronousModeStrict != effectivePatroniConfig.SynchronousModeStrict {
|
if desiredPatroniConfig.SynchronousModeStrict != effectivePatroniConfig.SynchronousModeStrict {
|
||||||
configToSet["synchronous_mode_strict"] = desiredPatroniConfig.SynchronousModeStrict
|
configToSet["synchronous_mode_strict"] = desiredPatroniConfig.SynchronousModeStrict
|
||||||
}
|
}
|
||||||
|
if desiredPatroniConfig.SynchronousNodeCount != effectivePatroniConfig.SynchronousNodeCount {
|
||||||
|
configToSet["synchronous_node_count"] = desiredPatroniConfig.SynchronousNodeCount
|
||||||
|
}
|
||||||
if desiredPatroniConfig.TTL > 0 && desiredPatroniConfig.TTL != effectivePatroniConfig.TTL {
|
if desiredPatroniConfig.TTL > 0 && desiredPatroniConfig.TTL != effectivePatroniConfig.TTL {
|
||||||
configToSet["ttl"] = desiredPatroniConfig.TTL
|
configToSet["ttl"] = desiredPatroniConfig.TTL
|
||||||
}
|
}
|
||||||
|
|
@ -664,7 +703,7 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
||||||
effectiveValue := effectivePgParameters[desiredOption]
|
effectiveValue := effectivePgParameters[desiredOption]
|
||||||
if isBootstrapOnlyParameter(desiredOption) && (effectiveValue != desiredValue) {
|
if isBootstrapOnlyParameter(desiredOption) && (effectiveValue != desiredValue) {
|
||||||
parametersToSet[desiredOption] = desiredValue
|
parametersToSet[desiredOption] = desiredValue
|
||||||
if util.SliceContains(requirePrimaryRestartWhenDecreased, desiredOption) {
|
if slices.Contains(requirePrimaryRestartWhenDecreased, desiredOption) {
|
||||||
effectiveValueNum, errConv := strconv.Atoi(effectiveValue)
|
effectiveValueNum, errConv := strconv.Atoi(effectiveValue)
|
||||||
desiredValueNum, errConv2 := strconv.Atoi(desiredValue)
|
desiredValueNum, errConv2 := strconv.Atoi(desiredValue)
|
||||||
if errConv != nil || errConv2 != nil {
|
if errConv != nil || errConv2 != nil {
|
||||||
|
|
@ -680,7 +719,7 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if there exist only config updates that require a restart of the primary
|
// check if there exist only config updates that require a restart of the primary
|
||||||
if len(restartPrimary) > 0 && !util.SliceContains(restartPrimary, false) && len(configToSet) == 0 {
|
if len(restartPrimary) > 0 && !slices.Contains(restartPrimary, false) && len(configToSet) == 0 {
|
||||||
requiresMasterRestart = true
|
requiresMasterRestart = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -710,6 +749,46 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
||||||
return configPatched, requiresMasterRestart, nil
|
return configPatched, requiresMasterRestart, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// syncStandbyClusterConfiguration checks whether standby cluster
|
||||||
|
// parameters have changed and if necessary sets it via the Patroni API
|
||||||
|
func (c *Cluster) syncStandbyClusterConfiguration() error {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
pods []v1.Pod
|
||||||
|
)
|
||||||
|
|
||||||
|
standbyOptionsToSet := make(map[string]interface{})
|
||||||
|
if c.Spec.StandbyCluster != nil {
|
||||||
|
c.logger.Infof("turning %q into a standby cluster", c.Name)
|
||||||
|
standbyOptionsToSet["create_replica_methods"] = []string{"bootstrap_standby_with_wale", "basebackup_fast_xlog"}
|
||||||
|
standbyOptionsToSet["restore_command"] = "envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\""
|
||||||
|
|
||||||
|
} else {
|
||||||
|
c.logger.Infof("promoting standby cluster and detach from source")
|
||||||
|
standbyOptionsToSet = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if pods, err = c.listPods(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(pods) == 0 {
|
||||||
|
return fmt.Errorf("could not call Patroni API: cluster has no pods")
|
||||||
|
}
|
||||||
|
// try all pods until the first one that is successful, as it doesn't matter which pod
|
||||||
|
// carries the request to change configuration through
|
||||||
|
for _, pod := range pods {
|
||||||
|
podName := util.NameFromMeta(pod.ObjectMeta)
|
||||||
|
c.logger.Debugf("patching Postgres config via Patroni API on pod %s with following options: %s",
|
||||||
|
podName, standbyOptionsToSet)
|
||||||
|
if err = c.patroni.SetStandbyClusterParameters(&pod, standbyOptionsToSet); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c.logger.Warningf("could not patch postgres parameters within pod %s: %v", podName, err)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("could not reach Patroni API to set Postgres options: failed on every pod (%d total)",
|
||||||
|
len(pods))
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) syncSecrets() error {
|
func (c *Cluster) syncSecrets() error {
|
||||||
c.logger.Info("syncing secrets")
|
c.logger.Info("syncing secrets")
|
||||||
c.setProcessName("syncing secrets")
|
c.setProcessName("syncing secrets")
|
||||||
|
|
@ -808,14 +887,17 @@ func (c *Cluster) updateSecret(
|
||||||
// if password rotation is enabled update password and username if rotation interval has been passed
|
// if password rotation is enabled update password and username if rotation interval has been passed
|
||||||
// rotation can be enabled globally or via the manifest (excluding the Postgres superuser)
|
// rotation can be enabled globally or via the manifest (excluding the Postgres superuser)
|
||||||
rotationEnabledInManifest := secretUsername != constants.SuperuserKeyName &&
|
rotationEnabledInManifest := secretUsername != constants.SuperuserKeyName &&
|
||||||
(util.SliceContains(c.Spec.UsersWithSecretRotation, secretUsername) ||
|
(slices.Contains(c.Spec.UsersWithSecretRotation, secretUsername) ||
|
||||||
util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername))
|
slices.Contains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername))
|
||||||
|
|
||||||
// globally enabled rotation is only allowed for manifest and bootstrapped roles
|
// globally enabled rotation is only allowed for manifest and bootstrapped roles
|
||||||
allowedRoleTypes := []spec.RoleOrigin{spec.RoleOriginManifest, spec.RoleOriginBootstrap}
|
allowedRoleTypes := []spec.RoleOrigin{spec.RoleOriginManifest, spec.RoleOriginBootstrap}
|
||||||
rotationAllowed := !pwdUser.IsDbOwner && util.SliceContains(allowedRoleTypes, pwdUser.Origin) && c.Spec.StandbyCluster == nil
|
rotationAllowed := !pwdUser.IsDbOwner && slices.Contains(allowedRoleTypes, pwdUser.Origin) && c.Spec.StandbyCluster == nil
|
||||||
|
|
||||||
if (c.OpConfig.EnablePasswordRotation && rotationAllowed) || rotationEnabledInManifest {
|
// users can ignore any kind of rotation
|
||||||
|
isIgnoringRotation := slices.Contains(c.Spec.UsersIgnoringSecretRotation, secretUsername)
|
||||||
|
|
||||||
|
if ((c.OpConfig.EnablePasswordRotation && rotationAllowed) || rotationEnabledInManifest) && !isIgnoringRotation {
|
||||||
updateSecretMsg, err = c.rotatePasswordInSecret(secret, secretUsername, pwdUser.Origin, currentTime, retentionUsers)
|
updateSecretMsg, err = c.rotatePasswordInSecret(secret, secretUsername, pwdUser.Origin, currentTime, retentionUsers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Warnf("password rotation failed for user %s: %v", secretUsername, err)
|
c.logger.Warnf("password rotation failed for user %s: %v", secretUsername, err)
|
||||||
|
|
@ -873,6 +955,8 @@ func (c *Cluster) rotatePasswordInSecret(
|
||||||
err error
|
err error
|
||||||
nextRotationDate time.Time
|
nextRotationDate time.Time
|
||||||
nextRotationDateStr string
|
nextRotationDateStr string
|
||||||
|
expectedUsername string
|
||||||
|
rotationModeChanged bool
|
||||||
updateSecretMsg string
|
updateSecretMsg string
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -893,17 +977,32 @@ func (c *Cluster) rotatePasswordInSecret(
|
||||||
nextRotationDate = currentRotationDate
|
nextRotationDate = currentRotationDate
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set username and check if it differs from current value in secret
|
||||||
|
currentUsername := string(secret.Data["username"])
|
||||||
|
if !slices.Contains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) {
|
||||||
|
expectedUsername = fmt.Sprintf("%s%s", secretUsername, currentTime.Format(constants.RotationUserDateFormat))
|
||||||
|
} else {
|
||||||
|
expectedUsername = secretUsername
|
||||||
|
}
|
||||||
|
|
||||||
|
// when changing to in-place rotation update secret immediatly
|
||||||
|
// if currentUsername is longer we know it has a date suffix
|
||||||
|
// the other way around we can wait until the next rotation date
|
||||||
|
if len(currentUsername) > len(expectedUsername) {
|
||||||
|
rotationModeChanged = true
|
||||||
|
c.logger.Infof("updating secret %s after switching to in-place rotation mode for username: %s", secretName, string(secret.Data["username"]))
|
||||||
|
}
|
||||||
|
|
||||||
// update password and next rotation date if configured interval has passed
|
// update password and next rotation date if configured interval has passed
|
||||||
if currentTime.After(nextRotationDate) {
|
if currentTime.After(nextRotationDate) || rotationModeChanged {
|
||||||
// create rotation user if role is not listed for in-place password update
|
// create rotation user if role is not listed for in-place password update
|
||||||
if !util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) {
|
if !slices.Contains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) {
|
||||||
rotationUsername := fmt.Sprintf("%s%s", secretUsername, currentTime.Format(constants.RotationUserDateFormat))
|
secret.Data["username"] = []byte(expectedUsername)
|
||||||
secret.Data["username"] = []byte(rotationUsername)
|
c.logger.Infof("updating username in secret %s and creating rotation user %s in the database", secretName, expectedUsername)
|
||||||
c.logger.Infof("updating username in secret %s and creating rotation user %s in the database", secretName, rotationUsername)
|
|
||||||
// whenever there is a rotation, check if old rotation users can be deleted
|
// whenever there is a rotation, check if old rotation users can be deleted
|
||||||
*retentionUsers = append(*retentionUsers, secretUsername)
|
*retentionUsers = append(*retentionUsers, secretUsername)
|
||||||
} else {
|
} else {
|
||||||
// when passwords of system users are rotated in place, pods have to be replaced
|
// when passwords of system users are rotated in-place, pods have to be replaced
|
||||||
if roleOrigin == spec.RoleOriginSystem {
|
if roleOrigin == spec.RoleOriginSystem {
|
||||||
pods, err := c.listPods()
|
pods, err := c.listPods()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -917,7 +1016,7 @@ func (c *Cluster) rotatePasswordInSecret(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// when password of connection pooler is rotated in place, pooler pods have to be replaced
|
// when password of connection pooler is rotated in-place, pooler pods have to be replaced
|
||||||
if roleOrigin == spec.RoleOriginConnectionPooler {
|
if roleOrigin == spec.RoleOriginConnectionPooler {
|
||||||
listOptions := metav1.ListOptions{
|
listOptions := metav1.ListOptions{
|
||||||
LabelSelector: c.poolerLabelsSet(true).String(),
|
LabelSelector: c.poolerLabelsSet(true).String(),
|
||||||
|
|
@ -934,10 +1033,12 @@ func (c *Cluster) rotatePasswordInSecret(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// when password of stream user is rotated in place, it should trigger rolling update in FES deployment
|
// when password of stream user is rotated in-place, it should trigger rolling update in FES deployment
|
||||||
if roleOrigin == spec.RoleOriginStream {
|
if roleOrigin == spec.RoleOriginStream {
|
||||||
c.logger.Warnf("password in secret of stream user %s changed", constants.EventStreamSourceSlotPrefix+constants.UserRoleNameSuffix)
|
c.logger.Warnf("password in secret of stream user %s changed", constants.EventStreamSourceSlotPrefix+constants.UserRoleNameSuffix)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
secret.Data["username"] = []byte(secretUsername)
|
||||||
}
|
}
|
||||||
secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength))
|
secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength))
|
||||||
secret.Data["nextRotation"] = []byte(nextRotationDateStr)
|
secret.Data["nextRotation"] = []byte(nextRotationDateStr)
|
||||||
|
|
|
||||||
|
|
@ -2,13 +2,15 @@ package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
@ -200,7 +202,7 @@ func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) {
|
||||||
|
|
||||||
// mocking a config after setConfig is called
|
// mocking a config after setConfig is called
|
||||||
configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}`
|
configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}`
|
||||||
r := ioutil.NopCloser(bytes.NewReader([]byte(configJson)))
|
r := io.NopCloser(bytes.NewReader([]byte(configJson)))
|
||||||
|
|
||||||
response := http.Response{
|
response := http.Response{
|
||||||
StatusCode: 200,
|
StatusCode: 200,
|
||||||
|
|
@ -480,6 +482,140 @@ func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSyncStandbyClusterConfiguration(t *testing.T) {
|
||||||
|
client, _ := newFakeK8sSyncClient()
|
||||||
|
clusterName := "acid-standby-cluster"
|
||||||
|
applicationLabel := "spilo"
|
||||||
|
namespace := "default"
|
||||||
|
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
|
||||||
|
pg := acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
NumberOfInstances: int32(1),
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1Gi",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var cluster = New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
PatroniAPICheckInterval: time.Duration(1),
|
||||||
|
PatroniAPICheckTimeout: time.Duration(5),
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterLabels: map[string]string{"application": applicationLabel},
|
||||||
|
ClusterNameLabel: "cluster-name",
|
||||||
|
DefaultCPURequest: "300m",
|
||||||
|
DefaultCPULimit: "300m",
|
||||||
|
DefaultMemoryRequest: "300Mi",
|
||||||
|
DefaultMemoryLimit: "300Mi",
|
||||||
|
MinInstances: int32(-1),
|
||||||
|
MaxInstances: int32(-1),
|
||||||
|
PodRoleLabel: "spilo-role",
|
||||||
|
ResourceCheckInterval: time.Duration(3),
|
||||||
|
ResourceCheckTimeout: time.Duration(10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, client, pg, logger, eventRecorder)
|
||||||
|
|
||||||
|
cluster.Name = clusterName
|
||||||
|
cluster.Namespace = namespace
|
||||||
|
|
||||||
|
// mocking a config after getConfig is called
|
||||||
|
mockClient := mocks.NewMockHTTPClient(ctrl)
|
||||||
|
configJson := `{"ttl": 20}`
|
||||||
|
r := io.NopCloser(bytes.NewReader([]byte(configJson)))
|
||||||
|
response := http.Response{
|
||||||
|
StatusCode: 200,
|
||||||
|
Body: r,
|
||||||
|
}
|
||||||
|
mockClient.EXPECT().Get(gomock.Any()).Return(&response, nil).AnyTimes()
|
||||||
|
|
||||||
|
// mocking a config after setConfig is called
|
||||||
|
standbyJson := `{"standby_cluster":{"create_replica_methods":["bootstrap_standby_with_wale","basebackup_fast_xlog"],"restore_command":"envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\""}}`
|
||||||
|
r = io.NopCloser(bytes.NewReader([]byte(standbyJson)))
|
||||||
|
response = http.Response{
|
||||||
|
StatusCode: 200,
|
||||||
|
Body: r,
|
||||||
|
}
|
||||||
|
mockClient.EXPECT().Do(gomock.Any()).Return(&response, nil).AnyTimes()
|
||||||
|
p := patroni.New(patroniLogger, mockClient)
|
||||||
|
cluster.patroni = p
|
||||||
|
|
||||||
|
mockPod := newMockPod("192.168.100.1")
|
||||||
|
mockPod.Name = fmt.Sprintf("%s-0", clusterName)
|
||||||
|
mockPod.Namespace = namespace
|
||||||
|
podLabels := map[string]string{
|
||||||
|
"cluster-name": clusterName,
|
||||||
|
"application": applicationLabel,
|
||||||
|
"spilo-role": "master",
|
||||||
|
}
|
||||||
|
mockPod.Labels = podLabels
|
||||||
|
client.PodsGetter.Pods(namespace).Create(context.TODO(), mockPod, metav1.CreateOptions{})
|
||||||
|
|
||||||
|
// create a statefulset
|
||||||
|
sts, err := cluster.createStatefulSet()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// check that pods do not have a STANDBY_* environment variable
|
||||||
|
assert.NotContains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"})
|
||||||
|
|
||||||
|
// add standby section
|
||||||
|
cluster.Spec.StandbyCluster = &acidv1.StandbyDescription{
|
||||||
|
S3WalPath: "s3://custom/path/to/bucket/",
|
||||||
|
}
|
||||||
|
cluster.syncStatefulSet()
|
||||||
|
updatedSts := cluster.Statefulset
|
||||||
|
|
||||||
|
// check that pods do not have a STANDBY_* environment variable
|
||||||
|
assert.Contains(t, updatedSts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"})
|
||||||
|
|
||||||
|
// this should update the Patroni config
|
||||||
|
err = cluster.syncStandbyClusterConfiguration()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
configJson = `{"standby_cluster":{"create_replica_methods":["bootstrap_standby_with_wale","basebackup_fast_xlog"],"restore_command":"envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\""}, "ttl": 20}`
|
||||||
|
r = io.NopCloser(bytes.NewReader([]byte(configJson)))
|
||||||
|
response = http.Response{
|
||||||
|
StatusCode: 200,
|
||||||
|
Body: r,
|
||||||
|
}
|
||||||
|
mockClient.EXPECT().Get(gomock.Any()).Return(&response, nil).AnyTimes()
|
||||||
|
|
||||||
|
pods, err := cluster.listPods()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
_, _, err = cluster.patroni.GetConfig(&pods[0])
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// ToDo extend GetConfig to return standy_cluster setting to compare
|
||||||
|
/*
|
||||||
|
defaultStandbyParameters := map[string]interface{}{
|
||||||
|
"create_replica_methods": []string{"bootstrap_standby_with_wale", "basebackup_fast_xlog"},
|
||||||
|
"restore_command": "envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\"",
|
||||||
|
}
|
||||||
|
assert.True(t, reflect.DeepEqual(defaultStandbyParameters, standbyCluster))
|
||||||
|
*/
|
||||||
|
// remove standby section
|
||||||
|
cluster.Spec.StandbyCluster = &acidv1.StandbyDescription{}
|
||||||
|
cluster.syncStatefulSet()
|
||||||
|
updatedSts2 := cluster.Statefulset
|
||||||
|
|
||||||
|
// check that pods do not have a STANDBY_* environment variable
|
||||||
|
assert.NotContains(t, updatedSts2.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"})
|
||||||
|
|
||||||
|
// this should update the Patroni config again
|
||||||
|
err = cluster.syncStandbyClusterConfiguration()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpdateSecret(t *testing.T) {
|
func TestUpdateSecret(t *testing.T) {
|
||||||
testName := "test syncing secrets"
|
testName := "test syncing secrets"
|
||||||
client, _ := newFakeK8sSyncSecretsClient()
|
client, _ := newFakeK8sSyncSecretsClient()
|
||||||
|
|
@ -488,6 +624,7 @@ func TestUpdateSecret(t *testing.T) {
|
||||||
namespace := "default"
|
namespace := "default"
|
||||||
dbname := "app"
|
dbname := "app"
|
||||||
dbowner := "appowner"
|
dbowner := "appowner"
|
||||||
|
appUser := "foo"
|
||||||
secretTemplate := config.StringTemplate("{username}.{cluster}.credentials")
|
secretTemplate := config.StringTemplate("{username}.{cluster}.credentials")
|
||||||
retentionUsers := make([]string, 0)
|
retentionUsers := make([]string, 0)
|
||||||
|
|
||||||
|
|
@ -499,7 +636,8 @@ func TestUpdateSecret(t *testing.T) {
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Databases: map[string]string{dbname: dbowner},
|
Databases: map[string]string{dbname: dbowner},
|
||||||
Users: map[string]acidv1.UserFlags{"foo": {}, dbowner: {}},
|
Users: map[string]acidv1.UserFlags{appUser: {}, "bar": {}, dbowner: {}},
|
||||||
|
UsersIgnoringSecretRotation: []string{"bar"},
|
||||||
UsersWithInPlaceSecretRotation: []string{dbowner},
|
UsersWithInPlaceSecretRotation: []string{dbowner},
|
||||||
Streams: []acidv1.Stream{
|
Streams: []acidv1.Stream{
|
||||||
{
|
{
|
||||||
|
|
@ -577,6 +715,9 @@ func TestUpdateSecret(t *testing.T) {
|
||||||
if pgUser.Origin != spec.RoleOriginManifest {
|
if pgUser.Origin != spec.RoleOriginManifest {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if slices.Contains(pg.Spec.UsersIgnoringSecretRotation, username) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
t.Errorf("%s: password unchanged in updated secret for %s", testName, username)
|
t.Errorf("%s: password unchanged in updated secret for %s", testName, username)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -604,4 +745,32 @@ func TestUpdateSecret(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// switch rotation for foo to in-place
|
||||||
|
inPlaceRotationUsers := []string{dbowner, appUser}
|
||||||
|
cluster.Spec.UsersWithInPlaceSecretRotation = inPlaceRotationUsers
|
||||||
|
cluster.initUsers()
|
||||||
|
cluster.syncSecrets()
|
||||||
|
updatedSecret, err := cluster.KubeClient.Secrets(namespace).Get(context.TODO(), cluster.credentialSecretName(appUser), metav1.GetOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// username in secret should be switched to original user
|
||||||
|
currentUsername := string(updatedSecret.Data["username"])
|
||||||
|
if currentUsername != appUser {
|
||||||
|
t.Errorf("%s: updated secret does not contain correct username: expected %s, got %s", testName, appUser, currentUsername)
|
||||||
|
}
|
||||||
|
|
||||||
|
// switch rotation back to rotation user
|
||||||
|
inPlaceRotationUsers = []string{dbowner}
|
||||||
|
cluster.Spec.UsersWithInPlaceSecretRotation = inPlaceRotationUsers
|
||||||
|
cluster.initUsers()
|
||||||
|
cluster.syncSecrets()
|
||||||
|
updatedSecret, err = cluster.KubeClient.Secrets(namespace).Get(context.TODO(), cluster.credentialSecretName(appUser), metav1.GetOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// username in secret will only be switched after next rotation date is passed
|
||||||
|
currentUsername = string(updatedSecret.Data["username"])
|
||||||
|
if currentUsername != appUser {
|
||||||
|
t.Errorf("%s: updated secret does not contain expected username: expected %s, got %s", testName, appUser, currentUsername)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,14 @@ func (c *Cluster) isProtectedUsername(username string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) isSystemUsername(username string) bool {
|
func (c *Cluster) isSystemUsername(username string) bool {
|
||||||
return (username == c.OpConfig.SuperUsername || username == c.OpConfig.ReplicationUsername)
|
// is there a pooler system user defined
|
||||||
|
for _, systemUser := range c.systemUsers {
|
||||||
|
if username == systemUser.Name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func isValidFlag(flag string) bool {
|
func isValidFlag(flag string) bool {
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix
|
result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix
|
||||||
result.EtcdHost = fromCRD.EtcdHost
|
result.EtcdHost = fromCRD.EtcdHost
|
||||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-15:3.0-p1")
|
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-16:3.2-p2")
|
||||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||||
result.MinInstances = fromCRD.MinInstances
|
result.MinInstances = fromCRD.MinInstances
|
||||||
result.MaxInstances = fromCRD.MaxInstances
|
result.MaxInstances = fromCRD.MaxInstances
|
||||||
|
|
@ -62,8 +62,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
// major version upgrade config
|
// major version upgrade config
|
||||||
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "off")
|
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "off")
|
||||||
result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList
|
result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList
|
||||||
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "11")
|
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "12")
|
||||||
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "15")
|
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "16")
|
||||||
|
|
||||||
// kubernetes config
|
// kubernetes config
|
||||||
result.CustomPodAnnotations = fromCRD.Kubernetes.CustomPodAnnotations
|
result.CustomPodAnnotations = fromCRD.Kubernetes.CustomPodAnnotations
|
||||||
|
|
@ -82,6 +82,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
||||||
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
||||||
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
||||||
|
result.PDBMasterLabelSelector = util.CoalesceBool(fromCRD.Kubernetes.PDBMasterLabelSelector, util.True())
|
||||||
result.EnablePodDisruptionBudget = util.CoalesceBool(fromCRD.Kubernetes.EnablePodDisruptionBudget, util.True())
|
result.EnablePodDisruptionBudget = util.CoalesceBool(fromCRD.Kubernetes.EnablePodDisruptionBudget, util.True())
|
||||||
result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "pvc")
|
result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "pvc")
|
||||||
result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True())
|
result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True())
|
||||||
|
|
@ -90,6 +91,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
||||||
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
|
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
|
||||||
result.EnableCrossNamespaceSecret = fromCRD.Kubernetes.EnableCrossNamespaceSecret
|
result.EnableCrossNamespaceSecret = fromCRD.Kubernetes.EnableCrossNamespaceSecret
|
||||||
|
result.EnableFinalizers = util.CoalesceBool(fromCRD.Kubernetes.EnableFinalizers, util.False())
|
||||||
|
|
||||||
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
|
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
|
||||||
if fromCRD.Kubernetes.InfrastructureRolesDefs != nil {
|
if fromCRD.Kubernetes.InfrastructureRolesDefs != nil {
|
||||||
|
|
@ -119,6 +121,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.NodeReadinessLabelMerge = fromCRD.Kubernetes.NodeReadinessLabelMerge
|
result.NodeReadinessLabelMerge = fromCRD.Kubernetes.NodeReadinessLabelMerge
|
||||||
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
||||||
result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready")
|
result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready")
|
||||||
|
result.PersistentVolumeClaimRetentionPolicy = fromCRD.Kubernetes.PersistentVolumeClaimRetentionPolicy
|
||||||
|
result.EnablePersistentVolumeClaimDeletion = util.CoalesceBool(fromCRD.Kubernetes.EnablePersistentVolumeClaimDeletion, util.True())
|
||||||
result.EnableReadinessProbe = fromCRD.Kubernetes.EnableReadinessProbe
|
result.EnableReadinessProbe = fromCRD.Kubernetes.EnableReadinessProbe
|
||||||
result.MasterPodMoveTimeout = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout), "10m")
|
result.MasterPodMoveTimeout = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout), "10m")
|
||||||
result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity
|
result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity
|
||||||
|
|
@ -127,12 +131,12 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.PodToleration = fromCRD.Kubernetes.PodToleration
|
result.PodToleration = fromCRD.Kubernetes.PodToleration
|
||||||
|
|
||||||
// Postgres Pod resources
|
// Postgres Pod resources
|
||||||
result.DefaultCPURequest = util.Coalesce(fromCRD.PostgresPodResources.DefaultCPURequest, "100m")
|
result.DefaultCPURequest = fromCRD.PostgresPodResources.DefaultCPURequest
|
||||||
result.DefaultMemoryRequest = util.Coalesce(fromCRD.PostgresPodResources.DefaultMemoryRequest, "100Mi")
|
result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest
|
||||||
result.DefaultCPULimit = util.Coalesce(fromCRD.PostgresPodResources.DefaultCPULimit, "1")
|
result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit
|
||||||
result.DefaultMemoryLimit = util.Coalesce(fromCRD.PostgresPodResources.DefaultMemoryLimit, "500Mi")
|
result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit
|
||||||
result.MinCPULimit = util.Coalesce(fromCRD.PostgresPodResources.MinCPULimit, "250m")
|
result.MinCPULimit = fromCRD.PostgresPodResources.MinCPULimit
|
||||||
result.MinMemoryLimit = util.Coalesce(fromCRD.PostgresPodResources.MinMemoryLimit, "250Mi")
|
result.MinMemoryLimit = fromCRD.PostgresPodResources.MinMemoryLimit
|
||||||
result.MaxCPURequest = fromCRD.PostgresPodResources.MaxCPURequest
|
result.MaxCPURequest = fromCRD.PostgresPodResources.MaxCPURequest
|
||||||
result.MaxMemoryRequest = fromCRD.PostgresPodResources.MaxMemoryRequest
|
result.MaxMemoryRequest = fromCRD.PostgresPodResources.MaxMemoryRequest
|
||||||
|
|
||||||
|
|
@ -174,7 +178,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
|
|
||||||
// logical backup config
|
// logical backup config
|
||||||
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
||||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.10.1")
|
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.11.0")
|
||||||
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
||||||
result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName
|
result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName
|
||||||
result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey
|
result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey
|
||||||
|
|
@ -188,6 +192,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.LogicalBackupS3RetentionTime = fromCRD.LogicalBackup.RetentionTime
|
result.LogicalBackupS3RetentionTime = fromCRD.LogicalBackup.RetentionTime
|
||||||
result.LogicalBackupGoogleApplicationCredentials = fromCRD.LogicalBackup.GoogleApplicationCredentials
|
result.LogicalBackupGoogleApplicationCredentials = fromCRD.LogicalBackup.GoogleApplicationCredentials
|
||||||
result.LogicalBackupJobPrefix = util.Coalesce(fromCRD.LogicalBackup.JobPrefix, "logical-backup-")
|
result.LogicalBackupJobPrefix = util.Coalesce(fromCRD.LogicalBackup.JobPrefix, "logical-backup-")
|
||||||
|
result.LogicalBackupCronjobEnvironmentSecret = fromCRD.LogicalBackup.CronjobEnvironmentSecret
|
||||||
result.LogicalBackupCPURequest = fromCRD.LogicalBackup.CPURequest
|
result.LogicalBackupCPURequest = fromCRD.LogicalBackup.CPURequest
|
||||||
result.LogicalBackupMemoryRequest = fromCRD.LogicalBackup.MemoryRequest
|
result.LogicalBackupMemoryRequest = fromCRD.LogicalBackup.MemoryRequest
|
||||||
result.LogicalBackupCPULimit = fromCRD.LogicalBackup.CPULimit
|
result.LogicalBackupCPULimit = fromCRD.LogicalBackup.CPULimit
|
||||||
|
|
@ -261,21 +266,10 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
fromCRD.ConnectionPooler.Mode,
|
fromCRD.ConnectionPooler.Mode,
|
||||||
constants.ConnectionPoolerDefaultMode)
|
constants.ConnectionPoolerDefaultMode)
|
||||||
|
|
||||||
result.ConnectionPooler.ConnectionPoolerDefaultCPURequest = util.Coalesce(
|
result.ConnectionPooler.ConnectionPoolerDefaultCPURequest = fromCRD.ConnectionPooler.DefaultCPURequest
|
||||||
fromCRD.ConnectionPooler.DefaultCPURequest,
|
result.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest = fromCRD.ConnectionPooler.DefaultMemoryRequest
|
||||||
constants.ConnectionPoolerDefaultCpuRequest)
|
result.ConnectionPooler.ConnectionPoolerDefaultCPULimit = fromCRD.ConnectionPooler.DefaultCPULimit
|
||||||
|
result.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit = fromCRD.ConnectionPooler.DefaultMemoryLimit
|
||||||
result.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest = util.Coalesce(
|
|
||||||
fromCRD.ConnectionPooler.DefaultMemoryRequest,
|
|
||||||
constants.ConnectionPoolerDefaultMemoryRequest)
|
|
||||||
|
|
||||||
result.ConnectionPooler.ConnectionPoolerDefaultCPULimit = util.Coalesce(
|
|
||||||
fromCRD.ConnectionPooler.DefaultCPULimit,
|
|
||||||
constants.ConnectionPoolerDefaultCpuLimit)
|
|
||||||
|
|
||||||
result.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit = util.Coalesce(
|
|
||||||
fromCRD.ConnectionPooler.DefaultMemoryLimit,
|
|
||||||
constants.ConnectionPoolerDefaultMemoryLimit)
|
|
||||||
|
|
||||||
result.ConnectionPooler.MaxDBConnections = util.CoalesceInt32(
|
result.ConnectionPooler.MaxDBConnections = util.CoalesceInt32(
|
||||||
fromCRD.ConnectionPooler.MaxDBConnections,
|
fromCRD.ConnectionPooler.MaxDBConnections,
|
||||||
|
|
|
||||||
|
|
@ -285,14 +285,18 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
||||||
lg.Errorf("unknown cluster: %q", clusterName)
|
lg.Errorf("unknown cluster: %q", clusterName)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
lg.Infoln("deletion of the cluster started")
|
|
||||||
|
|
||||||
teamName := strings.ToLower(cl.Spec.TeamID)
|
teamName := strings.ToLower(cl.Spec.TeamID)
|
||||||
|
|
||||||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||||
cl.Delete()
|
|
||||||
// Fixme - no error handling for delete ?
|
// when using finalizers the deletion already happened
|
||||||
// c.eventRecorder.Eventf(cl.GetReference, v1.EventTypeWarning, "Delete", "%v", cl.Error)
|
if c.opConfig.EnableFinalizers == nil || !*c.opConfig.EnableFinalizers {
|
||||||
|
lg.Infoln("deletion of the cluster started")
|
||||||
|
if err := cl.Delete(); err != nil {
|
||||||
|
cl.Error = fmt.Sprintf("could not delete cluster: %v", err)
|
||||||
|
c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Delete", "%v", cl.Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func() {
|
func() {
|
||||||
defer c.clustersMu.Unlock()
|
defer c.clustersMu.Unlock()
|
||||||
|
|
@ -325,16 +329,26 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
||||||
}
|
}
|
||||||
|
|
||||||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||||
err = cl.Sync(event.NewSpec)
|
|
||||||
if err != nil {
|
// has this cluster been marked as deleted already, then we shall start cleaning up
|
||||||
cl.Error = fmt.Sprintf("could not sync cluster: %v", err)
|
if !cl.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Sync", "%v", cl.Error)
|
lg.Infof("cluster has a DeletionTimestamp of %s, starting deletion now.", cl.ObjectMeta.DeletionTimestamp.Format(time.RFC3339))
|
||||||
lg.Error(cl.Error)
|
if err = cl.Delete(); err != nil {
|
||||||
return
|
cl.Error = fmt.Sprintf("error deleting cluster and its resources: %v", err)
|
||||||
|
c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Delete", "%v", cl.Error)
|
||||||
|
lg.Error(cl.Error)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err = cl.Sync(event.NewSpec); err != nil {
|
||||||
|
cl.Error = fmt.Sprintf("could not sync cluster: %v", err)
|
||||||
|
c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Sync", "%v", cl.Error)
|
||||||
|
lg.Error(cl.Error)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lg.Infof("cluster has been synced")
|
||||||
}
|
}
|
||||||
cl.Error = ""
|
cl.Error = ""
|
||||||
|
|
||||||
lg.Infof("cluster has been synced")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -347,7 +361,7 @@ func (c *Controller) processClusterEventsQueue(idx int, stopCh <-chan struct{},
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
obj, err := c.clusterEventQueues[idx].Pop(cache.PopProcessFunc(func(interface{}) error { return nil }))
|
obj, err := c.clusterEventQueues[idx].Pop(cache.PopProcessFunc(func(interface{}, bool) error { return nil }))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == cache.ErrFIFOClosed {
|
if err == cache.ErrFIFOClosed {
|
||||||
return
|
return
|
||||||
|
|
@ -560,13 +574,13 @@ func (c *Controller) postgresqlCheck(obj interface{}) *acidv1.Postgresql {
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Ensures the pod service account and role bindings exists in a namespace
|
Ensures the pod service account and role bindings exists in a namespace
|
||||||
before a PG cluster is created there so that a user does not have to deploy
|
before a PG cluster is created there so that a user does not have to deploy
|
||||||
these credentials manually. StatefulSets require the service account to
|
these credentials manually. StatefulSets require the service account to
|
||||||
create pods; Patroni requires relevant RBAC bindings to access endpoints
|
create pods; Patroni requires relevant RBAC bindings to access endpoints
|
||||||
or config maps.
|
or config maps.
|
||||||
|
|
||||||
The operator does not sync accounts/role bindings after creation.
|
The operator does not sync accounts/role bindings after creation.
|
||||||
*/
|
*/
|
||||||
func (c *Controller) submitRBACCredentials(event ClusterEvent) error {
|
func (c *Controller) submitRBACCredentials(event ClusterEvent) error {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2023 Compose, Zalando SE
|
Copyright 2024 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue