Compare commits
12 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
137f3e769d | |
|
|
f05150a81e | |
|
|
b97de5d7f1 | |
|
|
ad9ae4ec1b | |
|
|
32d6d0a7a7 | |
|
|
97115d6e3d | |
|
|
a585b17796 | |
|
|
0a44252534 | |
|
|
c331fd9434 | |
|
|
55cc167fca | |
|
|
f6839f87b9 | |
|
|
a06f8d796b |
|
|
@ -26,7 +26,7 @@ jobs:
|
||||||
go-version: "^1.25.3"
|
go-version: "^1.25.3"
|
||||||
|
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
run: make deps mocks test
|
run: make test
|
||||||
|
|
||||||
- name: Define image name
|
- name: Define image name
|
||||||
id: image
|
id: image
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
go-version: "^1.25.3"
|
go-version: "^1.25.3"
|
||||||
- name: Make dependencies
|
- name: Make dependencies
|
||||||
run: make deps mocks
|
run: make mocks
|
||||||
- name: Code generation
|
- name: Code generation
|
||||||
run: make codegen
|
run: make codegen
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
go-version: "^1.25.3"
|
go-version: "^1.25.3"
|
||||||
- name: Make dependencies
|
- name: Make dependencies
|
||||||
run: make deps mocks
|
run: make mocks
|
||||||
- name: Compile
|
- name: Compile
|
||||||
run: make linux
|
run: make linux
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
|
|
|
||||||
|
|
@ -106,3 +106,6 @@ mocks
|
||||||
ui/.npm/
|
ui/.npm/
|
||||||
|
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
|
# temp build files
|
||||||
|
pkg/apis/acid.zalan.do/v1/postgresql.crd.yaml
|
||||||
|
|
|
||||||
52
Makefile
52
Makefile
|
|
@ -13,13 +13,16 @@ LDFLAGS ?= -X=main.version=$(VERSION)
|
||||||
DOCKERDIR = docker
|
DOCKERDIR = docker
|
||||||
|
|
||||||
BASE_IMAGE ?= alpine:latest
|
BASE_IMAGE ?= alpine:latest
|
||||||
IMAGE ?= $(BINARY)
|
IMAGE ?= ghcr.io/zalando/$(BINARY)
|
||||||
TAG ?= $(VERSION)
|
TAG ?= $(VERSION)
|
||||||
GITHEAD = $(shell git rev-parse --short HEAD)
|
GITHEAD = $(shell git rev-parse --short HEAD)
|
||||||
GITURL = $(shell git config --get remote.origin.url)
|
GITURL = $(shell git config --get remote.origin.url)
|
||||||
GITSTATUS = $(shell git status --porcelain || echo "no changes")
|
GITSTATUS = $(shell git status --porcelain || echo "no changes")
|
||||||
SOURCES = cmd/main.go
|
SOURCES = cmd/main.go
|
||||||
VERSION ?= $(shell git describe --tags --always --dirty)
|
VERSION ?= $(shell git describe --tags --always --dirty)
|
||||||
|
CRD_SOURCES = $(shell find pkg/apis/zalando.org pkg/apis/acid.zalan.do -name '*.go' -not -name '*.deepcopy.go')
|
||||||
|
GENERATED_CRDS = manifests/postgresteam.crd.yaml manifests/postgresql.crd.yaml pkg/apis/acid.zalan.do/v1/postgresql.crd.yaml
|
||||||
|
GENERATED = pkg/apis/zalando.org/v1/zz_generated.deepcopy.go pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go
|
||||||
DIRS := cmd pkg
|
DIRS := cmd pkg
|
||||||
PKG := `go list ./... | grep -v /vendor/`
|
PKG := `go list ./... | grep -v /vendor/`
|
||||||
|
|
||||||
|
|
@ -51,18 +54,37 @@ default: local
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf build
|
rm -rf build
|
||||||
|
rm $(GENERATED)
|
||||||
|
rm $(GENERATED_CRDS)
|
||||||
|
|
||||||
local: ${SOURCES}
|
verify:
|
||||||
hack/verify-codegen.sh
|
hack/verify-codegen.sh
|
||||||
CGO_ENABLED=${CGO_ENABLED} go build -o build/${BINARY} $(LOCAL_BUILD_FLAGS) -ldflags "$(LDFLAGS)" $^
|
|
||||||
|
|
||||||
linux: ${SOURCES}
|
$(GENERATED): go.mod $(CRD_SOURCES)
|
||||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -o build/linux/${BINARY} ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $^
|
hack/update-codegen.sh
|
||||||
|
|
||||||
macos: ${SOURCES}
|
$(GENERATED_CRDS): $(GENERATED)
|
||||||
GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -o build/macos/${BINARY} ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $^
|
go tool controller-gen crd:crdVersions=v1,allowDangerousTypes=true paths=./pkg/apis/acid.zalan.do/... output:crd:dir=manifests
|
||||||
|
# only generate postgresteam.crd.yaml and postgresql.crd.yaml for now
|
||||||
|
@rm manifests/acid.zalan.do_operatorconfigurations.yaml
|
||||||
|
@mv manifests/acid.zalan.do_postgresqls.yaml manifests/postgresql.crd.yaml
|
||||||
|
@# hack to use lowercase kind and listKind
|
||||||
|
@sed -i -e 's/kind: Postgresql/kind: postgresql/' manifests/postgresql.crd.yaml
|
||||||
|
@sed -i -e 's/listKind: PostgresqlList/listKind: postgresqlList/' manifests/postgresql.crd.yaml
|
||||||
|
@hack/adjust_postgresql_crd.sh
|
||||||
|
@mv manifests/acid.zalan.do_postgresteams.yaml manifests/postgresteam.crd.yaml
|
||||||
|
@cp manifests/postgresql.crd.yaml pkg/apis/acid.zalan.do/v1/postgresql.crd.yaml
|
||||||
|
|
||||||
docker: ${DOCKERDIR}/${DOCKERFILE}
|
local: ${SOURCES} $(GENERATED_CRDS)
|
||||||
|
CGO_ENABLED=${CGO_ENABLED} go build -o build/${BINARY} $(LOCAL_BUILD_FLAGS) -ldflags "$(LDFLAGS)" $(SOURCES)
|
||||||
|
|
||||||
|
linux: ${SOURCES} $(GENERATED_CRDS)
|
||||||
|
GOOS=linux GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -o build/linux/${BINARY} ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $(SOURCES)
|
||||||
|
|
||||||
|
macos: ${SOURCES} $(GENERATED_CRDS)
|
||||||
|
GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -o build/macos/${BINARY} ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $(SOURCES)
|
||||||
|
|
||||||
|
docker: $(GENERATED_CRDS) ${DOCKERDIR}/${DOCKERFILE}
|
||||||
echo `(env)`
|
echo `(env)`
|
||||||
echo "Tag ${TAG}"
|
echo "Tag ${TAG}"
|
||||||
echo "Version ${VERSION}"
|
echo "Version ${VERSION}"
|
||||||
|
|
@ -76,11 +98,6 @@ indocker-race:
|
||||||
mocks:
|
mocks:
|
||||||
GO111MODULE=on go generate ./...
|
GO111MODULE=on go generate ./...
|
||||||
|
|
||||||
tools:
|
|
||||||
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.32.9
|
|
||||||
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
|
|
||||||
GO111MODULE=on go mod tidy
|
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
@gofmt -l -w -s $(DIRS)
|
@gofmt -l -w -s $(DIRS)
|
||||||
|
|
||||||
|
|
@ -88,15 +105,10 @@ vet:
|
||||||
@go vet $(PKG)
|
@go vet $(PKG)
|
||||||
@staticcheck $(PKG)
|
@staticcheck $(PKG)
|
||||||
|
|
||||||
deps: tools
|
test: mocks $(GENERATED) $(GENERATED_CRDS)
|
||||||
GO111MODULE=on go mod vendor
|
|
||||||
|
|
||||||
test:
|
|
||||||
hack/verify-codegen.sh
|
|
||||||
GO111MODULE=on go test ./...
|
GO111MODULE=on go test ./...
|
||||||
|
|
||||||
codegen:
|
codegen: $(GENERATED)
|
||||||
hack/update-codegen.sh
|
|
||||||
|
|
||||||
e2e: docker # build operator image to be tested
|
e2e: docker # build operator image to be tested
|
||||||
cd e2e; make e2etest
|
cd e2e; make e2etest
|
||||||
|
|
|
||||||
|
|
@ -9,4 +9,4 @@ mkdir -p "$team_repo"
|
||||||
ln -s "$PWD" "$project_dir"
|
ln -s "$PWD" "$project_dir"
|
||||||
cd "$project_dir"
|
cd "$project_dir"
|
||||||
|
|
||||||
make deps clean docker push
|
make clean docker push
|
||||||
|
|
|
||||||
|
|
@ -96,6 +96,8 @@ spec:
|
||||||
default: ""
|
default: ""
|
||||||
ignore_instance_limits_annotation_key:
|
ignore_instance_limits_annotation_key:
|
||||||
type: string
|
type: string
|
||||||
|
ignore_resources_limits_annotation_key:
|
||||||
|
type: string
|
||||||
kubernetes_use_configmaps:
|
kubernetes_use_configmaps:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
|
||||||
|
|
@ -493,13 +493,19 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
standby_port:
|
standby_port:
|
||||||
type: string
|
type: string
|
||||||
oneOf:
|
standby_primary_slot_name:
|
||||||
|
type: string
|
||||||
|
anyOf:
|
||||||
- required:
|
- required:
|
||||||
- s3_wal_path
|
- s3_wal_path
|
||||||
- required:
|
- required:
|
||||||
- gs_wal_path
|
- gs_wal_path
|
||||||
- required:
|
- required:
|
||||||
- standby_host
|
- standby_host
|
||||||
|
not:
|
||||||
|
required:
|
||||||
|
- s3_wal_path
|
||||||
|
- gs_wal_path
|
||||||
streams:
|
streams:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
|
|
||||||
|
|
@ -43,6 +43,9 @@ configGeneral:
|
||||||
# key name for annotation to ignore globally configured instance limits
|
# key name for annotation to ignore globally configured instance limits
|
||||||
# ignore_instance_limits_annotation_key: ""
|
# ignore_instance_limits_annotation_key: ""
|
||||||
|
|
||||||
|
# key name for annotation to ignore globally configured resources thresholds
|
||||||
|
# ignore_resources_limits_annotation_key: ""
|
||||||
|
|
||||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||||
# kubernetes_use_configmaps: false
|
# kubernetes_use_configmaps: false
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ pipeline:
|
||||||
commands:
|
commands:
|
||||||
- desc: Run unit tests
|
- desc: Run unit tests
|
||||||
cmd: |
|
cmd: |
|
||||||
make deps mocks test
|
make mocks test
|
||||||
|
|
||||||
- desc: Build Docker image
|
- desc: Build Docker image
|
||||||
cmd: |
|
cmd: |
|
||||||
|
|
@ -39,10 +39,6 @@ pipeline:
|
||||||
-f docker/Dockerfile \
|
-f docker/Dockerfile \
|
||||||
--push .
|
--push .
|
||||||
|
|
||||||
if [ -z ${CDP_SOURCE_BRANCH} ]; then
|
|
||||||
cdp-promote-image ${IMAGE}:${CDP_BUILD_VERSION}
|
|
||||||
fi
|
|
||||||
|
|
||||||
- id: build-operator-ui
|
- id: build-operator-ui
|
||||||
env:
|
env:
|
||||||
<<: *BUILD_ENV
|
<<: *BUILD_ENV
|
||||||
|
|
|
||||||
|
|
@ -1346,10 +1346,12 @@ If you are using [additional environment variables](#custom-pod-environment-vari
|
||||||
to access your backup location you have to copy those variables and prepend
|
to access your backup location you have to copy those variables and prepend
|
||||||
the `STANDBY_` prefix for Spilo to find the backups and WAL files to stream.
|
the `STANDBY_` prefix for Spilo to find the backups and WAL files to stream.
|
||||||
|
|
||||||
Alternatively, standby clusters can also stream from a remote primary cluster.
|
Standby clusters can also stream from a remote primary cluster.
|
||||||
You have to specify the host address. Port is optional and defaults to 5432.
|
You have to specify the host address. Port is optional and defaults to 5432.
|
||||||
Note, that only one of the options (`s3_wal_path`, `gs_wal_path`,
|
You can combine `standby_host` with either `s3_wal_path` or `gs_wal_path`
|
||||||
`standby_host`) can be present under the `standby` top-level key.
|
for additional redundancy. Note that `s3_wal_path` and `gs_wal_path` are
|
||||||
|
mutually exclusive. At least one of `s3_wal_path`, `gs_wal_path`, or
|
||||||
|
`standby_host` must be specified under the `standby` top-level key.
|
||||||
|
|
||||||
## Logical backups
|
## Logical backups
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,12 +33,9 @@ by setting the `GO111MODULE` environment variable to `on`. The make targets do
|
||||||
this for you, so simply run
|
this for you, so simply run
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make deps
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
This would take a while to complete. You have to redo `make deps` every time
|
|
||||||
your dependencies list changes, i.e. after adding a new library dependency.
|
|
||||||
|
|
||||||
Build the operator with the `make docker` command. You may define the TAG
|
Build the operator with the `make docker` command. You may define the TAG
|
||||||
variable to assign an explicit tag to your Docker image and the IMAGE to set
|
variable to assign an explicit tag to your Docker image and the IMAGE to set
|
||||||
the image name. By default, the tag is computed with
|
the image name. By default, the tag is computed with
|
||||||
|
|
@ -223,14 +220,13 @@ dlv connect 127.0.0.1:DLV_PORT
|
||||||
Prerequisites:
|
Prerequisites:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make deps
|
|
||||||
make mocks
|
make mocks
|
||||||
```
|
```
|
||||||
|
|
||||||
To run all unit tests, you can simply do:
|
To run all unit tests, you can simply do:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
go test ./pkg/...
|
make test
|
||||||
```
|
```
|
||||||
|
|
||||||
In case if you need to debug your unit test, it's possible to use delve:
|
In case if you need to debug your unit test, it's possible to use delve:
|
||||||
|
|
|
||||||
|
|
@ -457,22 +457,31 @@ under the `clone` top-level key and do not affect the already running cluster.
|
||||||
|
|
||||||
On startup, an existing `standby` top-level key creates a standby Postgres
|
On startup, an existing `standby` top-level key creates a standby Postgres
|
||||||
cluster streaming from a remote location - either from a S3 or GCS WAL
|
cluster streaming from a remote location - either from a S3 or GCS WAL
|
||||||
archive or a remote primary. Only one of options is allowed and required
|
archive, a remote primary, or a combination of both. At least one of
|
||||||
if the `standby` key is present.
|
`s3_wal_path`, `gs_wal_path`, or `standby_host` must be specified.
|
||||||
|
Note that `s3_wal_path` and `gs_wal_path` are mutually exclusive.
|
||||||
|
|
||||||
* **s3_wal_path**
|
* **s3_wal_path**
|
||||||
the url to S3 bucket containing the WAL archive of the remote primary.
|
the url to S3 bucket containing the WAL archive of the remote primary.
|
||||||
|
Can be combined with `standby_host` for additional redundancy.
|
||||||
|
|
||||||
* **gs_wal_path**
|
* **gs_wal_path**
|
||||||
the url to GS bucket containing the WAL archive of the remote primary.
|
the url to GS bucket containing the WAL archive of the remote primary.
|
||||||
|
Can be combined with `standby_host` for additional redundancy.
|
||||||
|
|
||||||
* **standby_host**
|
* **standby_host**
|
||||||
hostname or IP address of the primary to stream from.
|
hostname or IP address of the primary to stream from.
|
||||||
|
Can be specified alone or combined with either `s3_wal_path` or `gs_wal_path`.
|
||||||
|
|
||||||
* **standby_port**
|
* **standby_port**
|
||||||
TCP port on which the primary is listening for connections. Patroni will
|
TCP port on which the primary is listening for connections. Patroni will
|
||||||
use `"5432"` if not set.
|
use `"5432"` if not set.
|
||||||
|
|
||||||
|
* **standby_primary_slot_name**
|
||||||
|
name of the replication slot to use on the primary server when streaming
|
||||||
|
from a remote primary. See the Patroni documentation
|
||||||
|
[here](https://patroni.readthedocs.io/en/latest/standby_cluster.html) for more details. Optional.
|
||||||
|
|
||||||
## Volume properties
|
## Volume properties
|
||||||
|
|
||||||
Those parameters are grouped under the `volume` top-level key and define the
|
Those parameters are grouped under the `volume` top-level key and define the
|
||||||
|
|
|
||||||
|
|
@ -163,7 +163,15 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
for some clusters it might be required to scale beyond the limits that can be
|
for some clusters it might be required to scale beyond the limits that can be
|
||||||
configured with `min_instances` and `max_instances` options. You can define
|
configured with `min_instances` and `max_instances` options. You can define
|
||||||
an annotation key that can be used as a toggle in cluster manifests to ignore
|
an annotation key that can be used as a toggle in cluster manifests to ignore
|
||||||
globally configured instance limits. The default is empty.
|
globally configured instance limits. The value must be `"true"` to be
|
||||||
|
effective. The default is empty which means the feature is disabled.
|
||||||
|
|
||||||
|
* **ignore_resources_limits_annotation_key**
|
||||||
|
for some clusters it might be required to request resources beyond the globally
|
||||||
|
configured thresholds for maximum requests and minimum limits. You can define
|
||||||
|
an annotation key that can be used as a toggle in cluster manifests to ignore
|
||||||
|
the thresholds. The value must be `"true"` to be effective. The default is empty
|
||||||
|
which means the feature is disabled.
|
||||||
|
|
||||||
* **resync_period**
|
* **resync_period**
|
||||||
period between consecutive sync requests. The default is `30m`.
|
period between consecutive sync requests. The default is `30m`.
|
||||||
|
|
|
||||||
15
docs/user.md
15
docs/user.md
|
|
@ -900,8 +900,9 @@ the PostgreSQL version between source and target cluster has to be the same.
|
||||||
|
|
||||||
To start a cluster as standby, add the following `standby` section in the YAML
|
To start a cluster as standby, add the following `standby` section in the YAML
|
||||||
file. You can stream changes from archived WAL files (AWS S3 or Google Cloud
|
file. You can stream changes from archived WAL files (AWS S3 or Google Cloud
|
||||||
Storage) or from a remote primary. Only one option can be specified in the
|
Storage), from a remote primary, or combine a remote primary with a WAL archive.
|
||||||
manifest:
|
At least one of `s3_wal_path`, `gs_wal_path`, or `standby_host` must be specified.
|
||||||
|
Note that `s3_wal_path` and `gs_wal_path` are mutually exclusive.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
spec:
|
spec:
|
||||||
|
|
@ -929,6 +930,16 @@ spec:
|
||||||
standby_port: "5433"
|
standby_port: "5433"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
You can also combine a remote primary with a WAL archive for additional redundancy:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
standby:
|
||||||
|
standby_host: "acid-minimal-cluster.default"
|
||||||
|
standby_port: "5433"
|
||||||
|
s3_wal_path: "s3://<bucketname>/spilo/<source_db_cluster>/<UID>/wal/<PGVERSION>"
|
||||||
|
```
|
||||||
|
|
||||||
Note, that the pods and services use the same role labels like for normal clusters:
|
Note, that the pods and services use the same role labels like for normal clusters:
|
||||||
The standby leader is labeled as `master`. When using the `standby_host` option
|
The standby leader is labeled as `master`. When using the `standby_host` option
|
||||||
you have to copy the credentials from the source cluster's secrets to successfully
|
you have to copy the credentials from the source cluster's secrets to successfully
|
||||||
|
|
|
||||||
16
e2e/run.sh
16
e2e/run.sh
|
|
@ -7,7 +7,7 @@ set -o pipefail
|
||||||
IFS=$'\n\t'
|
IFS=$'\n\t'
|
||||||
|
|
||||||
readonly cluster_name="postgres-operator-e2e-tests"
|
readonly cluster_name="postgres-operator-e2e-tests"
|
||||||
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
readonly kubeconfig_path="${HOME}/kind-config-${cluster_name}"
|
||||||
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3"
|
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3"
|
||||||
readonly e2e_test_runner_image="ghcr.io/zalando/postgres-operator-e2e-tests-runner:latest"
|
readonly e2e_test_runner_image="ghcr.io/zalando/postgres-operator-e2e-tests-runner:latest"
|
||||||
|
|
||||||
|
|
@ -19,11 +19,17 @@ echo "Kubeconfig path: ${kubeconfig_path}"
|
||||||
|
|
||||||
function pull_images(){
|
function pull_images(){
|
||||||
operator_tag=$(git describe --tags --always --dirty)
|
operator_tag=$(git describe --tags --always --dirty)
|
||||||
if [[ -z $(docker images -q ghcr.io/zalando/postgres-operator:${operator_tag}) ]]
|
image_name="ghcr.io/zalando/postgres-operator:${operator_tag}"
|
||||||
|
if [[ -z $(docker images -q "${image_name}") ]]
|
||||||
then
|
then
|
||||||
docker pull ghcr.io/zalando/postgres-operator:latest
|
if ! docker pull "${image_name}"
|
||||||
|
then
|
||||||
|
echo "Failed to pull operator image: ${image_name}"
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
operator_image=$(docker images --filter=reference="ghcr.io/zalando/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1)
|
fi
|
||||||
|
operator_image="${image_name}"
|
||||||
|
echo "Using operator image: ${operator_image}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function start_kind(){
|
function start_kind(){
|
||||||
|
|
@ -52,7 +58,7 @@ function set_kind_api_server_ip(){
|
||||||
# but update the IP address of the API server to the one from the Docker 'bridge' network
|
# but update the IP address of the API server to the one from the Docker 'bridge' network
|
||||||
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
|
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
|
||||||
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
|
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
|
||||||
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
|
sed "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}" > "${kubeconfig_path}".tmp && mv "${kubeconfig_path}".tmp "${kubeconfig_path}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function generate_certificate(){
|
function generate_certificate(){
|
||||||
|
|
|
||||||
26
go.mod
26
go.mod
|
|
@ -15,20 +15,21 @@ require (
|
||||||
golang.org/x/crypto v0.45.0
|
golang.org/x/crypto v0.45.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
k8s.io/api v0.32.9
|
k8s.io/api v0.32.9
|
||||||
k8s.io/apiextensions-apiserver v0.25.9
|
k8s.io/apiextensions-apiserver v0.32.9
|
||||||
k8s.io/apimachinery v0.32.9
|
k8s.io/apimachinery v0.32.9
|
||||||
k8s.io/client-go v0.32.9
|
k8s.io/client-go v0.32.9
|
||||||
k8s.io/code-generator v0.25.9
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||||
|
github.com/fatih/color v1.18.0 // indirect
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||||
github.com/go-openapi/swag v0.23.0 // indirect
|
github.com/go-openapi/swag v0.23.0 // indirect
|
||||||
|
github.com/gobuffalo/flect v1.0.3 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/google/gnostic-models v0.6.9 // indirect
|
github.com/google/gnostic-models v0.6.9 // indirect
|
||||||
|
|
@ -36,18 +37,22 @@ require (
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/moby/spdystream v0.5.0 // indirect
|
github.com/moby/spdystream v0.5.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/cobra v1.9.1 // indirect
|
||||||
|
github.com/spf13/pflag v1.0.6 // indirect
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
golang.org/x/mod v0.29.0 // indirect
|
golang.org/x/mod v0.29.0 // indirect
|
||||||
golang.org/x/net v0.47.0 // indirect
|
golang.org/x/net v0.47.0 // indirect
|
||||||
|
|
@ -63,13 +68,24 @@ require (
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
k8s.io/code-generator v0.32.9 // indirect
|
||||||
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect
|
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||||
k8s.io/klog/v2 v2.130.1 // indirect
|
k8s.io/klog/v2 v2.130.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||||
|
sigs.k8s.io/controller-tools v0.17.3 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
tool (
|
||||||
|
github.com/golang/mock/mockgen
|
||||||
|
k8s.io/code-generator
|
||||||
|
k8s.io/code-generator/cmd/client-gen
|
||||||
|
k8s.io/code-generator/cmd/deepcopy-gen
|
||||||
|
k8s.io/code-generator/cmd/informer-gen
|
||||||
|
k8s.io/code-generator/cmd/lister-gen
|
||||||
|
sigs.k8s.io/controller-tools/cmd/controller-gen
|
||||||
|
)
|
||||||
|
|
|
||||||
57
go.sum
57
go.sum
|
|
@ -4,6 +4,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||||
github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ=
|
github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ=
|
||||||
github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk=
|
github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
|
@ -11,9 +12,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
|
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||||
|
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||||
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
|
||||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||||
|
|
@ -26,6 +30,8 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr
|
||||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
|
github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
|
||||||
|
github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||||
|
|
@ -34,12 +40,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||||
|
|
@ -48,6 +52,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||||
|
|
@ -58,7 +64,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
|
@ -70,6 +75,11 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
|
@ -83,10 +93,14 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||||
|
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||||
|
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||||
|
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||||
|
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
|
@ -96,10 +110,13 @@ github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M=
|
||||||
github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig=
|
github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig=
|
||||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||||
|
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||||
|
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
|
|
@ -150,6 +167,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
|
@ -163,7 +182,6 @@ golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
|
@ -180,13 +198,14 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
|
||||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
|
@ -196,25 +215,24 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI=
|
k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI=
|
||||||
k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y=
|
k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y=
|
||||||
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
|
k8s.io/apiextensions-apiserver v0.32.9 h1:tpT1dUgWqEsTyrdoGckyw8OBASW1JfU08tHGaYBzFHY=
|
||||||
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
|
k8s.io/apiextensions-apiserver v0.32.9/go.mod h1:FoCi4zCLK67LNCCssFa2Wr9q4Xbvjx7MW4tdze5tpoA=
|
||||||
k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs=
|
k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs=
|
||||||
k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||||
k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c=
|
k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c=
|
||||||
k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI=
|
k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI=
|
||||||
k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w=
|
k8s.io/code-generator v0.32.9 h1:F9Gti/8I+nVNnQw02J36/YlSD5JMg4qDJ7sfRqpUICU=
|
||||||
k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI=
|
k8s.io/code-generator v0.32.9/go.mod h1:fLYBG9g52EJulRebmomL0vCU0PQeMr7mnscfZtAAGV4=
|
||||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
|
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||||
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 h1:cErOOTkQ3JW19o4lo91fFurouhP8NcoBvb7CkvhZZpk=
|
|
||||||
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
|
||||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
|
||||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
|
sigs.k8s.io/controller-tools v0.17.3 h1:lwFPLicpBKLgIepah+c8ikRBubFW5kOQyT88r3EwfNw=
|
||||||
|
sigs.k8s.io/controller-tools v0.17.3/go.mod h1:1ii+oXcYZkxcBXzwv3YZBlzjt1fvkrCGjVF73blosJI=
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||||
|
|
@ -222,6 +240,5 @@ sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
|
||||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,24 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Hack to adjust the generated postgresql CRD YAML file and add missing field
|
||||||
|
# settings which can not be expressed via kubebuilder markers.
|
||||||
|
#
|
||||||
|
# Injections:
|
||||||
|
#
|
||||||
|
# * oneOf: for the standby field to enforce validation rules:
|
||||||
|
# - s3_wal_path and gs_wal_path are mutually exclusive
|
||||||
|
# - standby_host can be specified alone or with either s3_wal_path OR gs_wal_path
|
||||||
|
# - at least one of s3_wal_path, gs_wal_path, or standby_host must be set
|
||||||
|
# * type: string and pattern for the maintenanceWindows items.
|
||||||
|
|
||||||
|
file="${1:-"manifests/postgresql.crd.yaml"}"
|
||||||
|
|
||||||
|
sed -i '/^[[:space:]]*standby:$/{
|
||||||
|
# Capture the indentation
|
||||||
|
s/^\([[:space:]]*\)standby:$/\1standby:\n\1 anyOf:\n\1 - required:\n\1 - s3_wal_path\n\1 - required:\n\1 - gs_wal_path\n\1 - required:\n\1 - standby_host\n\1 not:\n\1 required:\n\1 - s3_wal_path\n\1 - gs_wal_path/
|
||||||
|
}' "$file"
|
||||||
|
|
||||||
|
sed -i '/^[[:space:]]*maintenanceWindows:$/{
|
||||||
|
# Capture the indentation
|
||||||
|
s/^\([[:space:]]*\)maintenanceWindows:$/\1maintenanceWindows:\n\1 items:\n\1 pattern: '\''^\\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))-((2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))\\ *$'\''\n\1 type: string/
|
||||||
|
}' "$file"
|
||||||
|
|
@ -1,19 +0,0 @@
|
||||||
// +build tools
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright 2019 The Kubernetes Authors.
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
|
|
||||||
package tools
|
|
||||||
|
|
||||||
import _ "k8s.io/code-generator"
|
|
||||||
|
|
@ -1,26 +1,67 @@
|
||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2017 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
GENERATED_PACKAGE_ROOT="github.com"
|
SRC="github.com"
|
||||||
OPERATOR_PACKAGE_ROOT="${GENERATED_PACKAGE_ROOT}/zalando/postgres-operator"
|
GOPKG="$SRC/zalando/postgres-operator"
|
||||||
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
CUSTOM_RESOURCE_NAME_ZAL="zalando.org"
|
||||||
TARGET_CODE_DIR=${1-${SCRIPT_ROOT}/pkg}
|
CUSTOM_RESOURCE_NAME_ACID="acid.zalan.do"
|
||||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo "${GOPATH}"/src/k8s.io/code-generator)}
|
CUSTOM_RESOURCE_VERSION="v1"
|
||||||
|
|
||||||
cleanup() {
|
SCRIPT_ROOT="$(dirname "${BASH_SOURCE[0]}")/.."
|
||||||
rm -rf "${GENERATED_PACKAGE_ROOT}"
|
|
||||||
}
|
|
||||||
trap "cleanup" EXIT SIGINT
|
|
||||||
|
|
||||||
bash "${CODEGEN_PKG}/generate-groups.sh" client,deepcopy,informer,lister \
|
OUTPUT_DIR="pkg/generated"
|
||||||
"${OPERATOR_PACKAGE_ROOT}/pkg/generated" "${OPERATOR_PACKAGE_ROOT}/pkg/apis" \
|
OUTPUT_PKG="${GOPKG}/${OUTPUT_DIR}"
|
||||||
"acid.zalan.do:v1 zalando.org:v1" \
|
APIS_PKG="${GOPKG}/pkg/apis"
|
||||||
--go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt \
|
GROUPS_WITH_VERSIONS="${CUSTOM_RESOURCE_NAME_ZAL}:${CUSTOM_RESOURCE_VERSION},${CUSTOM_RESOURCE_NAME_ACID}:${CUSTOM_RESOURCE_VERSION}"
|
||||||
-o ./
|
|
||||||
|
|
||||||
cp -r "${OPERATOR_PACKAGE_ROOT}"/pkg/* "${TARGET_CODE_DIR}"
|
echo "Generating deepcopy funcs"
|
||||||
|
go tool deepcopy-gen \
|
||||||
|
--output-file zz_generated.deepcopy.go \
|
||||||
|
--bounding-dirs "${APIS_PKG}" \
|
||||||
|
--go-header-file "${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt" \
|
||||||
|
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ZAL}/${CUSTOM_RESOURCE_VERSION}" \
|
||||||
|
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ACID}/${CUSTOM_RESOURCE_VERSION}"
|
||||||
|
|
||||||
cleanup
|
echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}"
|
||||||
|
go tool client-gen \
|
||||||
|
--clientset-name versioned \
|
||||||
|
--input-base "${APIS_PKG}" \
|
||||||
|
--input "${CUSTOM_RESOURCE_NAME_ZAL}/${CUSTOM_RESOURCE_VERSION},${CUSTOM_RESOURCE_NAME_ACID}/${CUSTOM_RESOURCE_VERSION}" \
|
||||||
|
--output-pkg "${OUTPUT_PKG}/clientset" \
|
||||||
|
--go-header-file "${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt" \
|
||||||
|
--output-dir "${OUTPUT_DIR}/clientset"
|
||||||
|
|
||||||
|
echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers"
|
||||||
|
go tool lister-gen \
|
||||||
|
--output-pkg "${OUTPUT_PKG}/listers" \
|
||||||
|
--go-header-file "${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt" \
|
||||||
|
--output-dir "${OUTPUT_DIR}/listers" \
|
||||||
|
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ZAL}/${CUSTOM_RESOURCE_VERSION}" \
|
||||||
|
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ACID}/${CUSTOM_RESOURCE_VERSION}"
|
||||||
|
|
||||||
|
echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers"
|
||||||
|
go tool informer-gen \
|
||||||
|
--versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \
|
||||||
|
--listers-package "${OUTPUT_PKG}/listers" \
|
||||||
|
--output-pkg "${OUTPUT_PKG}/informers" \
|
||||||
|
--go-header-file "${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt" \
|
||||||
|
--output-dir "${OUTPUT_DIR}/informers" \
|
||||||
|
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ZAL}/${CUSTOM_RESOURCE_VERSION}" \
|
||||||
|
"${APIS_PKG}/${CUSTOM_RESOURCE_NAME_ACID}/${CUSTOM_RESOURCE_VERSION}"
|
||||||
|
|
|
||||||
|
|
@ -75,6 +75,7 @@ data:
|
||||||
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||||
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
||||||
# ignore_instance_limits_annotation_key: ""
|
# ignore_instance_limits_annotation_key: ""
|
||||||
|
# ignore_resources_limits_annotation_key: ""
|
||||||
# inherited_annotations: owned-by
|
# inherited_annotations: owned-by
|
||||||
# inherited_labels: application,environment
|
# inherited_labels: application,environment
|
||||||
# kube_iam_role: ""
|
# kube_iam_role: ""
|
||||||
|
|
|
||||||
|
|
@ -94,6 +94,8 @@ spec:
|
||||||
default: ""
|
default: ""
|
||||||
ignore_instance_limits_annotation_key:
|
ignore_instance_limits_annotation_key:
|
||||||
type: string
|
type: string
|
||||||
|
ignore_resources_limits_annotation_key:
|
||||||
|
type: string
|
||||||
kubernetes_use_configmaps:
|
kubernetes_use_configmaps:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@ configuration:
|
||||||
enable_team_id_clustername_prefix: false
|
enable_team_id_clustername_prefix: false
|
||||||
etcd_host: ""
|
etcd_host: ""
|
||||||
# ignore_instance_limits_annotation_key: ""
|
# ignore_instance_limits_annotation_key: ""
|
||||||
|
# ignore_resources_limits_annotation_key: ""
|
||||||
# kubernetes_use_configmaps: false
|
# kubernetes_use_configmaps: false
|
||||||
max_instances: -1
|
max_instances: -1
|
||||||
min_instances: -1
|
min_instances: -1
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,68 +1,82 @@
|
||||||
|
---
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
|
annotations:
|
||||||
|
controller-gen.kubebuilder.io/version: v0.17.3
|
||||||
name: postgresteams.acid.zalan.do
|
name: postgresteams.acid.zalan.do
|
||||||
spec:
|
spec:
|
||||||
group: acid.zalan.do
|
group: acid.zalan.do
|
||||||
names:
|
names:
|
||||||
|
categories:
|
||||||
|
- all
|
||||||
kind: PostgresTeam
|
kind: PostgresTeam
|
||||||
listKind: PostgresTeamList
|
listKind: PostgresTeamList
|
||||||
plural: postgresteams
|
plural: postgresteams
|
||||||
singular: postgresteam
|
|
||||||
shortNames:
|
shortNames:
|
||||||
- pgteam
|
- pgteam
|
||||||
categories:
|
singular: postgresteam
|
||||||
- all
|
|
||||||
scope: Namespaced
|
scope: Namespaced
|
||||||
versions:
|
versions:
|
||||||
- name: v1
|
- name: v1
|
||||||
|
schema:
|
||||||
|
openAPIV3Schema:
|
||||||
|
description: PostgresTeam defines Custom Resource Definition Object for team
|
||||||
|
management.
|
||||||
|
properties:
|
||||||
|
apiVersion:
|
||||||
|
description: |-
|
||||||
|
APIVersion defines the versioned schema of this representation of an object.
|
||||||
|
Servers should convert recognized schemas to the latest internal value, and
|
||||||
|
may reject unrecognized values.
|
||||||
|
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||||
|
type: string
|
||||||
|
kind:
|
||||||
|
description: |-
|
||||||
|
Kind is a string value representing the REST resource this object represents.
|
||||||
|
Servers may infer this from the endpoint the client submits requests to.
|
||||||
|
Cannot be updated.
|
||||||
|
In CamelCase.
|
||||||
|
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||||
|
type: string
|
||||||
|
metadata:
|
||||||
|
type: object
|
||||||
|
spec:
|
||||||
|
description: PostgresTeamSpec defines the specification for the PostgresTeam
|
||||||
|
TPR.
|
||||||
|
properties:
|
||||||
|
additionalMembers:
|
||||||
|
additionalProperties:
|
||||||
|
description: List of users who will also be added to the Postgres
|
||||||
|
cluster.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
description: Map for teamId and associated additional users
|
||||||
|
type: object
|
||||||
|
additionalSuperuserTeams:
|
||||||
|
additionalProperties:
|
||||||
|
description: List of teams to become Postgres superusers
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
description: Map for teamId and associated additional superuser teams
|
||||||
|
type: object
|
||||||
|
additionalTeams:
|
||||||
|
additionalProperties:
|
||||||
|
description: List of teams whose members will also be added to the
|
||||||
|
Postgres cluster.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
description: Map for teamId and associated additional teams
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- metadata
|
||||||
|
- spec
|
||||||
|
type: object
|
||||||
served: true
|
served: true
|
||||||
storage: true
|
storage: true
|
||||||
subresources:
|
subresources:
|
||||||
status: {}
|
status: {}
|
||||||
schema:
|
|
||||||
openAPIV3Schema:
|
|
||||||
type: object
|
|
||||||
required:
|
|
||||||
- kind
|
|
||||||
- apiVersion
|
|
||||||
- spec
|
|
||||||
properties:
|
|
||||||
kind:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- PostgresTeam
|
|
||||||
apiVersion:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- acid.zalan.do/v1
|
|
||||||
spec:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
additionalSuperuserTeams:
|
|
||||||
type: object
|
|
||||||
description: "Map for teamId and associated additional superuser teams"
|
|
||||||
additionalProperties:
|
|
||||||
type: array
|
|
||||||
nullable: true
|
|
||||||
description: "List of teams to become Postgres superusers"
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
additionalTeams:
|
|
||||||
type: object
|
|
||||||
description: "Map for teamId and associated additional teams"
|
|
||||||
additionalProperties:
|
|
||||||
type: array
|
|
||||||
nullable: true
|
|
||||||
description: "List of teams whose members will also be added to the Postgres cluster"
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
additionalMembers:
|
|
||||||
type: object
|
|
||||||
description: "Map for teamId and associated additional users"
|
|
||||||
additionalProperties:
|
|
||||||
type: array
|
|
||||||
nullable: true
|
|
||||||
description: "List of users who will also be added to the Postgres cluster"
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,9 @@ spec:
|
||||||
numberOfInstances: 1
|
numberOfInstances: 1
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "17"
|
version: "17"
|
||||||
# Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming.
|
# Make this a standby cluster. You can specify s3_wal_path or gs_wal_path for WAL archive,
|
||||||
|
# standby_host for remote primary streaming, or combine standby_host with either WAL path.
|
||||||
|
# Note: s3_wal_path and gs_wal_path are mutually exclusive.
|
||||||
standby:
|
standby:
|
||||||
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"
|
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"
|
||||||
standby_host: "acid-minimal-cluster.default"
|
standby_host: "acid-minimal-cluster.default"
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -288,6 +288,8 @@ type OperatorConfigurationData struct {
|
||||||
MinInstances int32 `json:"min_instances,omitempty"`
|
MinInstances int32 `json:"min_instances,omitempty"`
|
||||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||||
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
|
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
|
||||||
|
|
||||||
|
IgnoreResourcesLimitsAnnotationKey string `json:"ignore_resources_limits_annotation_key,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Duration shortens this frequently used name
|
// Duration shortens this frequently used name
|
||||||
|
|
|
||||||
|
|
@ -8,18 +8,33 @@ import (
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
// PostgresTeam defines Custom Resource Definition Object for team management.
|
// PostgresTeam defines Custom Resource Definition Object for team management.
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
|
// +kubebuilder:resource:shortName=pgteam,categories=all
|
||||||
|
// +kubebuilder:subresource:status
|
||||||
type PostgresTeam struct {
|
type PostgresTeam struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
metav1.ObjectMeta `json:"metadata"`
|
||||||
|
|
||||||
Spec PostgresTeamSpec `json:"spec"`
|
Spec PostgresTeamSpec `json:"spec"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List of users who will also be added to the Postgres cluster.
|
||||||
|
type Users []string
|
||||||
|
|
||||||
|
// List of teams whose members will also be added to the Postgres cluster.
|
||||||
|
type Teams []string
|
||||||
|
|
||||||
|
// List of teams to become Postgres superusers
|
||||||
|
type SuperUserTeams []string
|
||||||
|
|
||||||
// PostgresTeamSpec defines the specification for the PostgresTeam TPR.
|
// PostgresTeamSpec defines the specification for the PostgresTeam TPR.
|
||||||
type PostgresTeamSpec struct {
|
type PostgresTeamSpec struct {
|
||||||
AdditionalSuperuserTeams map[string][]string `json:"additionalSuperuserTeams,omitempty"`
|
// Map for teamId and associated additional superuser teams
|
||||||
AdditionalTeams map[string][]string `json:"additionalTeams,omitempty"`
|
AdditionalSuperuserTeams map[string]SuperUserTeams `json:"additionalSuperuserTeams,omitempty"`
|
||||||
AdditionalMembers map[string][]string `json:"additionalMembers,omitempty"`
|
// Map for teamId and associated additional teams
|
||||||
|
AdditionalTeams map[string]Teams `json:"additionalTeams,omitempty"`
|
||||||
|
// Map for teamId and associated additional users
|
||||||
|
AdditionalMembers map[string]Users `json:"additionalMembers,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
|
||||||
|
|
@ -11,13 +11,25 @@ import (
|
||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
// Postgresql defines PostgreSQL Custom Resource Definition Object.
|
// Postgresql defines PostgreSQL Custom Resource Definition Object.
|
||||||
|
// +kubebuilder:resource:categories=all,shortName=pg,scope=Namespaced
|
||||||
|
// +kubebuilder:printcolumn:name="Team",type=string,JSONPath=`.spec.teamId`,description="Team responsible for Postgres cluster"
|
||||||
|
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.postgresql.version`,description="PostgreSQL version"
|
||||||
|
// +kubebuilder:printcolumn:name="Pods",type=integer,JSONPath=`.spec.numberOfInstances`,description="Number of Pods per Postgres cluster"
|
||||||
|
// +kubebuilder:printcolumn:name="Volume",type=string,JSONPath=`.spec.volume.size`,description="Size of the bound volume"
|
||||||
|
// +kubebuilder:printcolumn:name="CPU-Request",type=string,JSONPath=`.spec.resources.requests.cpu`,description="Requested CPU for Postgres containers"
|
||||||
|
// +kubebuilder:printcolumn:name="Memory-Request",type=string,JSONPath=`.spec.resources.requests.memory`,description="Requested memory for Postgres containers"
|
||||||
|
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="Age of the PostgreSQL cluster"
|
||||||
|
// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.PostgresClusterStatus`,description="Current sync status of postgresql resource"
|
||||||
|
// +kubebuilder:subresource:status
|
||||||
type Postgresql struct {
|
type Postgresql struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
metav1.ObjectMeta `json:"metadata"`
|
||||||
|
|
||||||
Spec PostgresSpec `json:"spec"`
|
Spec PostgresSpec `json:"spec"`
|
||||||
|
// +optional
|
||||||
Status PostgresStatus `json:"status"`
|
Status PostgresStatus `json:"status"`
|
||||||
Error string `json:"-"`
|
Error string `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
@ -25,8 +37,9 @@ type Postgresql struct {
|
||||||
// PostgresSpec defines the specification for the PostgreSQL TPR.
|
// PostgresSpec defines the specification for the PostgreSQL TPR.
|
||||||
type PostgresSpec struct {
|
type PostgresSpec struct {
|
||||||
PostgresqlParam `json:"postgresql"`
|
PostgresqlParam `json:"postgresql"`
|
||||||
Volume `json:"volume,omitempty"`
|
Volume `json:"volume"`
|
||||||
Patroni `json:"patroni,omitempty"`
|
// +optional
|
||||||
|
Patroni `json:"patroni"`
|
||||||
*Resources `json:"resources,omitempty"`
|
*Resources `json:"resources,omitempty"`
|
||||||
|
|
||||||
EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"`
|
EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"`
|
||||||
|
|
@ -53,19 +66,32 @@ type PostgresSpec struct {
|
||||||
// deprecated load balancer settings maintained for backward compatibility
|
// deprecated load balancer settings maintained for backward compatibility
|
||||||
// see "Load balancers" operator docs
|
// see "Load balancers" operator docs
|
||||||
UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"`
|
UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"`
|
||||||
|
// deprecated
|
||||||
ReplicaLoadBalancer *bool `json:"replicaLoadBalancer,omitempty"`
|
ReplicaLoadBalancer *bool `json:"replicaLoadBalancer,omitempty"`
|
||||||
|
|
||||||
// load balancers' source ranges are the same for master and replica services
|
// load balancers' source ranges are the same for master and replica services
|
||||||
|
// +nullable
|
||||||
|
// +kubebuilder:validation:items:Pattern=`^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$`
|
||||||
|
// +optional
|
||||||
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
||||||
|
|
||||||
Users map[string]UserFlags `json:"users,omitempty"`
|
Users map[string]UserFlags `json:"users,omitempty"`
|
||||||
|
// +nullable
|
||||||
UsersIgnoringSecretRotation []string `json:"usersIgnoringSecretRotation,omitempty"`
|
UsersIgnoringSecretRotation []string `json:"usersIgnoringSecretRotation,omitempty"`
|
||||||
|
// +nullable
|
||||||
UsersWithSecretRotation []string `json:"usersWithSecretRotation,omitempty"`
|
UsersWithSecretRotation []string `json:"usersWithSecretRotation,omitempty"`
|
||||||
|
// +nullable
|
||||||
UsersWithInPlaceSecretRotation []string `json:"usersWithInPlaceSecretRotation,omitempty"`
|
UsersWithInPlaceSecretRotation []string `json:"usersWithInPlaceSecretRotation,omitempty"`
|
||||||
|
|
||||||
|
// +kubebuilder:validation:Minimum=0
|
||||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||||
|
// +kubebuilder:validation:Schemaless
|
||||||
|
// +kubebuilder:validation:Type=array
|
||||||
|
// +kubebuilde:validation:items:Type=string
|
||||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||||
Clone *CloneDescription `json:"clone,omitempty"`
|
Clone *CloneDescription `json:"clone,omitempty"`
|
||||||
|
// Note: usernames specified here as database owners must be declared
|
||||||
|
// in the users key of the spec key.
|
||||||
Databases map[string]string `json:"databases,omitempty"`
|
Databases map[string]string `json:"databases,omitempty"`
|
||||||
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||||
SchedulerName *string `json:"schedulerName,omitempty"`
|
SchedulerName *string `json:"schedulerName,omitempty"`
|
||||||
|
|
@ -77,6 +103,7 @@ type PostgresSpec struct {
|
||||||
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
||||||
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
||||||
LogicalBackupRetention string `json:"logicalBackupRetention,omitempty"`
|
LogicalBackupRetention string `json:"logicalBackupRetention,omitempty"`
|
||||||
|
// +kubebuilder:validation:Pattern=`^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$`
|
||||||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||||
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
|
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
|
||||||
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||||
|
|
@ -90,8 +117,9 @@ type PostgresSpec struct {
|
||||||
Streams []Stream `json:"streams,omitempty"`
|
Streams []Stream `json:"streams,omitempty"`
|
||||||
Env []v1.EnvVar `json:"env,omitempty"`
|
Env []v1.EnvVar `json:"env,omitempty"`
|
||||||
|
|
||||||
// deprecated json tags
|
// deprecated
|
||||||
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
||||||
|
// deprecated
|
||||||
PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"`
|
PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -123,13 +151,14 @@ type PreparedSchema struct {
|
||||||
type MaintenanceWindow struct {
|
type MaintenanceWindow struct {
|
||||||
Everyday bool `json:"everyday,omitempty"`
|
Everyday bool `json:"everyday,omitempty"`
|
||||||
Weekday time.Weekday `json:"weekday,omitempty"`
|
Weekday time.Weekday `json:"weekday,omitempty"`
|
||||||
StartTime metav1.Time `json:"startTime,omitempty"`
|
StartTime metav1.Time `json:"startTime"`
|
||||||
EndTime metav1.Time `json:"endTime,omitempty"`
|
EndTime metav1.Time `json:"endTime"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Volume describes a single volume in the manifest.
|
// Volume describes a single volume in the manifest.
|
||||||
type Volume struct {
|
type Volume struct {
|
||||||
Selector *metav1.LabelSelector `json:"selector,omitempty"`
|
Selector *metav1.LabelSelector `json:"selector,omitempty"`
|
||||||
|
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||||
Size string `json:"size"`
|
Size string `json:"size"`
|
||||||
StorageClass string `json:"storageClass,omitempty"`
|
StorageClass string `json:"storageClass,omitempty"`
|
||||||
SubPath string `json:"subPath,omitempty"`
|
SubPath string `json:"subPath,omitempty"`
|
||||||
|
|
@ -145,28 +174,61 @@ type AdditionalVolume struct {
|
||||||
MountPath string `json:"mountPath"`
|
MountPath string `json:"mountPath"`
|
||||||
SubPath string `json:"subPath,omitempty"`
|
SubPath string `json:"subPath,omitempty"`
|
||||||
IsSubPathExpr *bool `json:"isSubPathExpr,omitempty"`
|
IsSubPathExpr *bool `json:"isSubPathExpr,omitempty"`
|
||||||
|
// +nullable
|
||||||
|
// +optional
|
||||||
TargetContainers []string `json:"targetContainers"`
|
TargetContainers []string `json:"targetContainers"`
|
||||||
|
// +kubebuilder:validation:XPreserveUnknownFields
|
||||||
|
// +kubebuilder:validation:Type=object
|
||||||
|
// +kubebuilder:validation:Schemaless
|
||||||
VolumeSource v1.VolumeSource `json:"volumeSource"`
|
VolumeSource v1.VolumeSource `json:"volumeSource"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
|
// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
|
||||||
type PostgresqlParam struct {
|
type PostgresqlParam struct {
|
||||||
|
// +kubebuilder:validation:Enum="13";"14";"15";"16";"17"
|
||||||
PgVersion string `json:"version"`
|
PgVersion string `json:"version"`
|
||||||
Parameters map[string]string `json:"parameters,omitempty"`
|
Parameters map[string]string `json:"parameters,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourceDescription describes CPU and memory resources defined for a cluster.
|
// ResourceDescription describes CPU and memory resources defined for a cluster.
|
||||||
type ResourceDescription struct {
|
type ResourceDescription struct {
|
||||||
|
// Decimal natural followed by m, or decimal natural followed by
|
||||||
|
// dot followed by up to three decimal digits.
|
||||||
|
//
|
||||||
|
// This is because the Kubernetes CPU resource has millis as the
|
||||||
|
// maximum precision. The actual values are checked in code
|
||||||
|
// because the regular expression would be huge and horrible and
|
||||||
|
// not very helpful in validation error messages; this one checks
|
||||||
|
// only the format of the given number.
|
||||||
|
//
|
||||||
|
// https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
|
||||||
|
//
|
||||||
|
// Note: the value specified here must not be zero or be lower
|
||||||
|
// than the corresponding request.
|
||||||
|
// +kubebuilder:validation:Pattern=`^(\d+m|\d+(\.\d{1,3})?)$`
|
||||||
CPU *string `json:"cpu,omitempty"`
|
CPU *string `json:"cpu,omitempty"`
|
||||||
|
// You can express memory as a plain integer or as a fixed-point
|
||||||
|
// integer using one of these suffixes: E, P, T, G, M, k. You can
|
||||||
|
// also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki
|
||||||
|
//
|
||||||
|
// https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory
|
||||||
|
//
|
||||||
|
// Note: the value specified here must not be zero or be higher
|
||||||
|
// than the corresponding limit.
|
||||||
|
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||||
Memory *string `json:"memory,omitempty"`
|
Memory *string `json:"memory,omitempty"`
|
||||||
|
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||||
HugePages2Mi *string `json:"hugepages-2Mi,omitempty"`
|
HugePages2Mi *string `json:"hugepages-2Mi,omitempty"`
|
||||||
|
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||||
HugePages1Gi *string `json:"hugepages-1Gi,omitempty"`
|
HugePages1Gi *string `json:"hugepages-1Gi,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resources describes requests and limits for the cluster resouces.
|
// Resources describes requests and limits for the cluster resouces.
|
||||||
type Resources struct {
|
type Resources struct {
|
||||||
ResourceRequests ResourceDescription `json:"requests,omitempty"`
|
// +optional
|
||||||
ResourceLimits ResourceDescription `json:"limits,omitempty"`
|
ResourceRequests ResourceDescription `json:"requests"`
|
||||||
|
// +optional
|
||||||
|
ResourceLimits ResourceDescription `json:"limits"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Patroni contains Patroni-specific configuration
|
// Patroni contains Patroni-specific configuration
|
||||||
|
|
@ -176,7 +238,7 @@ type Patroni struct {
|
||||||
TTL uint32 `json:"ttl,omitempty"`
|
TTL uint32 `json:"ttl,omitempty"`
|
||||||
LoopWait uint32 `json:"loop_wait,omitempty"`
|
LoopWait uint32 `json:"loop_wait,omitempty"`
|
||||||
RetryTimeout uint32 `json:"retry_timeout,omitempty"`
|
RetryTimeout uint32 `json:"retry_timeout,omitempty"`
|
||||||
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
|
MaximumLagOnFailover int64 `json:"maximum_lag_on_failover,omitempty"`
|
||||||
Slots map[string]map[string]string `json:"slots,omitempty"`
|
Slots map[string]map[string]string `json:"slots,omitempty"`
|
||||||
SynchronousMode bool `json:"synchronous_mode,omitempty"`
|
SynchronousMode bool `json:"synchronous_mode,omitempty"`
|
||||||
SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"`
|
SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"`
|
||||||
|
|
@ -184,16 +246,20 @@ type Patroni struct {
|
||||||
FailsafeMode *bool `json:"failsafe_mode,omitempty"`
|
FailsafeMode *bool `json:"failsafe_mode,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// StandbyDescription contains remote primary config or s3/gs wal path
|
// StandbyDescription contains remote primary config and/or s3/gs wal path.
|
||||||
|
// standby_host can be specified alone or together with either s3_wal_path OR gs_wal_path (mutually exclusive).
|
||||||
|
// At least one field must be specified. s3_wal_path and gs_wal_path are mutually exclusive.
|
||||||
type StandbyDescription struct {
|
type StandbyDescription struct {
|
||||||
S3WalPath string `json:"s3_wal_path,omitempty"`
|
S3WalPath string `json:"s3_wal_path,omitempty"`
|
||||||
GSWalPath string `json:"gs_wal_path,omitempty"`
|
GSWalPath string `json:"gs_wal_path,omitempty"`
|
||||||
StandbyHost string `json:"standby_host,omitempty"`
|
StandbyHost string `json:"standby_host,omitempty"`
|
||||||
StandbyPort string `json:"standby_port,omitempty"`
|
StandbyPort string `json:"standby_port,omitempty"`
|
||||||
|
StandbyPrimarySlotName string `json:"standby_primary_slot_name,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TLSDescription specs TLS properties
|
// TLSDescription specs TLS properties
|
||||||
type TLSDescription struct {
|
type TLSDescription struct {
|
||||||
|
// +required
|
||||||
SecretName string `json:"secretName,omitempty"`
|
SecretName string `json:"secretName,omitempty"`
|
||||||
CertificateFile string `json:"certificateFile,omitempty"`
|
CertificateFile string `json:"certificateFile,omitempty"`
|
||||||
PrivateKeyFile string `json:"privateKeyFile,omitempty"`
|
PrivateKeyFile string `json:"privateKeyFile,omitempty"`
|
||||||
|
|
@ -203,8 +269,14 @@ type TLSDescription struct {
|
||||||
|
|
||||||
// CloneDescription describes which cluster the new should clone and up to which point in time
|
// CloneDescription describes which cluster the new should clone and up to which point in time
|
||||||
type CloneDescription struct {
|
type CloneDescription struct {
|
||||||
|
// +required
|
||||||
ClusterName string `json:"cluster,omitempty"`
|
ClusterName string `json:"cluster,omitempty"`
|
||||||
|
// +kubebuilder:validation:Format=uuid
|
||||||
UID string `json:"uid,omitempty"`
|
UID string `json:"uid,omitempty"`
|
||||||
|
// The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC
|
||||||
|
// Example: 1996-12-19T16:39:57-08:00
|
||||||
|
// Note: this field requires a timezone
|
||||||
|
// +kubebuilder:validation:Pattern=`^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$`
|
||||||
EndTimestamp string `json:"timestamp,omitempty"`
|
EndTimestamp string `json:"timestamp,omitempty"`
|
||||||
S3WalPath string `json:"s3_wal_path,omitempty"`
|
S3WalPath string `json:"s3_wal_path,omitempty"`
|
||||||
S3Endpoint string `json:"s3_endpoint,omitempty"`
|
S3Endpoint string `json:"s3_endpoint,omitempty"`
|
||||||
|
|
@ -224,6 +296,8 @@ type Sidecar struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users
|
// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users
|
||||||
|
// +kubebuilder:validation:items:Enum=bypassrls;BYPASSRLS;nobypassrls;NOBYPASSRLS;createdb;CREATEDB;nocreatedb;NOCREATEDB;createrole;CREATEROLE;nocreaterole;NOCREATEROLE;inherit;INHERIT;noinherit;NOINHERIT;login;LOGIN;nologin;NOLOGIN;replication;REPLICATION;noreplication;NOREPLICATION;superuser;SUPERUSER;nosuperuser;NOSUPERUSER
|
||||||
|
// +nullable
|
||||||
type UserFlags []string
|
type UserFlags []string
|
||||||
|
|
||||||
// PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.)
|
// PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.)
|
||||||
|
|
@ -242,9 +316,11 @@ type PostgresStatus struct {
|
||||||
// makes sense to expose. E.g. pool size (min/max boundaries), max client
|
// makes sense to expose. E.g. pool size (min/max boundaries), max client
|
||||||
// connections etc.
|
// connections etc.
|
||||||
type ConnectionPooler struct {
|
type ConnectionPooler struct {
|
||||||
|
// +kubebuilder:validation:Minimum=1
|
||||||
NumberOfInstances *int32 `json:"numberOfInstances,omitempty"`
|
NumberOfInstances *int32 `json:"numberOfInstances,omitempty"`
|
||||||
Schema string `json:"schema,omitempty"`
|
Schema string `json:"schema,omitempty"`
|
||||||
User string `json:"user,omitempty"`
|
User string `json:"user,omitempty"`
|
||||||
|
// +kubebuilder:validation:Enum=session;transaction
|
||||||
Mode string `json:"mode,omitempty"`
|
Mode string `json:"mode,omitempty"`
|
||||||
DockerImage string `json:"dockerImage,omitempty"`
|
DockerImage string `json:"dockerImage,omitempty"`
|
||||||
MaxDBConnections *int32 `json:"maxDBConnections,omitempty"`
|
MaxDBConnections *int32 `json:"maxDBConnections,omitempty"`
|
||||||
|
|
@ -259,7 +335,9 @@ type Stream struct {
|
||||||
Tables map[string]StreamTable `json:"tables"`
|
Tables map[string]StreamTable `json:"tables"`
|
||||||
Filter map[string]*string `json:"filter,omitempty"`
|
Filter map[string]*string `json:"filter,omitempty"`
|
||||||
BatchSize *uint32 `json:"batchSize,omitempty"`
|
BatchSize *uint32 `json:"batchSize,omitempty"`
|
||||||
|
// +kubebuilder:validation:Pattern=`^(\d+m|\d+(\.\d{1,3})?)$`
|
||||||
CPU *string `json:"cpu,omitempty"`
|
CPU *string `json:"cpu,omitempty"`
|
||||||
|
// +kubebuilder:validation:Pattern=`^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$`
|
||||||
Memory *string `json:"memory,omitempty"`
|
Memory *string `json:"memory,omitempty"`
|
||||||
EnableRecovery *bool `json:"enableRecovery,omitempty"`
|
EnableRecovery *bool `json:"enableRecovery,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -975,14 +975,14 @@ func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.AdditionalSuperuserTeams != nil {
|
if in.AdditionalSuperuserTeams != nil {
|
||||||
in, out := &in.AdditionalSuperuserTeams, &out.AdditionalSuperuserTeams
|
in, out := &in.AdditionalSuperuserTeams, &out.AdditionalSuperuserTeams
|
||||||
*out = make(map[string][]string, len(*in))
|
*out = make(map[string]SuperUserTeams, len(*in))
|
||||||
for key, val := range *in {
|
for key, val := range *in {
|
||||||
var outVal []string
|
var outVal []string
|
||||||
if val == nil {
|
if val == nil {
|
||||||
(*out)[key] = nil
|
(*out)[key] = nil
|
||||||
} else {
|
} else {
|
||||||
in, out := &val, &outVal
|
in, out := &val, &outVal
|
||||||
*out = make([]string, len(*in))
|
*out = make(SuperUserTeams, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
(*out)[key] = outVal
|
(*out)[key] = outVal
|
||||||
|
|
@ -990,14 +990,14 @@ func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) {
|
||||||
}
|
}
|
||||||
if in.AdditionalTeams != nil {
|
if in.AdditionalTeams != nil {
|
||||||
in, out := &in.AdditionalTeams, &out.AdditionalTeams
|
in, out := &in.AdditionalTeams, &out.AdditionalTeams
|
||||||
*out = make(map[string][]string, len(*in))
|
*out = make(map[string]Teams, len(*in))
|
||||||
for key, val := range *in {
|
for key, val := range *in {
|
||||||
var outVal []string
|
var outVal []string
|
||||||
if val == nil {
|
if val == nil {
|
||||||
(*out)[key] = nil
|
(*out)[key] = nil
|
||||||
} else {
|
} else {
|
||||||
in, out := &val, &outVal
|
in, out := &val, &outVal
|
||||||
*out = make([]string, len(*in))
|
*out = make(Teams, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
(*out)[key] = outVal
|
(*out)[key] = outVal
|
||||||
|
|
@ -1005,14 +1005,14 @@ func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) {
|
||||||
}
|
}
|
||||||
if in.AdditionalMembers != nil {
|
if in.AdditionalMembers != nil {
|
||||||
in, out := &in.AdditionalMembers, &out.AdditionalMembers
|
in, out := &in.AdditionalMembers, &out.AdditionalMembers
|
||||||
*out = make(map[string][]string, len(*in))
|
*out = make(map[string]Users, len(*in))
|
||||||
for key, val := range *in {
|
for key, val := range *in {
|
||||||
var outVal []string
|
var outVal []string
|
||||||
if val == nil {
|
if val == nil {
|
||||||
(*out)[key] = nil
|
(*out)[key] = nil
|
||||||
} else {
|
} else {
|
||||||
in, out := &val, &outVal
|
in, out := &val, &outVal
|
||||||
*out = make([]string, len(*in))
|
*out = make(Users, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
(*out)[key] = outVal
|
(*out)[key] = outVal
|
||||||
|
|
@ -1400,6 +1400,26 @@ func (in *StreamTable) DeepCopy() *StreamTable {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in SuperUserTeams) DeepCopyInto(out *SuperUserTeams) {
|
||||||
|
{
|
||||||
|
in := &in
|
||||||
|
*out = make(SuperUserTeams, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuperUserTeams.
|
||||||
|
func (in SuperUserTeams) DeepCopy() SuperUserTeams {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(SuperUserTeams)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return *out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *TLSDescription) DeepCopyInto(out *TLSDescription) {
|
func (in *TLSDescription) DeepCopyInto(out *TLSDescription) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|
@ -1416,6 +1436,26 @@ func (in *TLSDescription) DeepCopy() *TLSDescription {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in Teams) DeepCopyInto(out *Teams) {
|
||||||
|
{
|
||||||
|
in := &in
|
||||||
|
*out = make(Teams, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Teams.
|
||||||
|
func (in Teams) DeepCopy() Teams {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Teams)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return *out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) {
|
func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|
@ -1469,6 +1509,26 @@ func (in UserFlags) DeepCopy() UserFlags {
|
||||||
return *out
|
return *out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in Users) DeepCopyInto(out *Users) {
|
||||||
|
{
|
||||||
|
in := &in
|
||||||
|
*out = make(Users, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Users.
|
||||||
|
func (in Users) DeepCopy() Users {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Users)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return *out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Volume) DeepCopyInto(out *Volume) {
|
func (in *Volume) DeepCopyInto(out *Volume) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|
|
||||||
|
|
@ -9,14 +9,16 @@ import (
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
// FabricEventStream defines FabricEventStream Custom Resource Definition Object.
|
// FabricEventStream defines FabricEventStream Custom Resource Definition Object.
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type FabricEventStream struct {
|
type FabricEventStream struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
metav1.ObjectMeta `json:"metadata"`
|
||||||
|
|
||||||
Spec FabricEventStreamSpec `json:"spec"`
|
Spec FabricEventStreamSpec `json:"spec"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FabricEventStreamSpec defines the specification for the FabricEventStream TPR.
|
// FabricEventStreamSpec defines the specification for the FabricEventStream TPR.
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type FabricEventStreamSpec struct {
|
type FabricEventStreamSpec struct {
|
||||||
ApplicationId string `json:"applicationId"`
|
ApplicationId string `json:"applicationId"`
|
||||||
EventStreams []EventStream `json:"eventStreams"`
|
EventStreams []EventStream `json:"eventStreams"`
|
||||||
|
|
@ -25,6 +27,7 @@ type FabricEventStreamSpec struct {
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
// FabricEventStreamList defines a list of FabricEventStreams .
|
// FabricEventStreamList defines a list of FabricEventStreams .
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type FabricEventStreamList struct {
|
type FabricEventStreamList struct {
|
||||||
metav1.TypeMeta `json:",inline"`
|
metav1.TypeMeta `json:",inline"`
|
||||||
metav1.ListMeta `json:"metadata"`
|
metav1.ListMeta `json:"metadata"`
|
||||||
|
|
@ -33,6 +36,7 @@ type FabricEventStreamList struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventStream defines the source, flow and sink of the event stream
|
// EventStream defines the source, flow and sink of the event stream
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type EventStream struct {
|
type EventStream struct {
|
||||||
EventStreamFlow EventStreamFlow `json:"flow"`
|
EventStreamFlow EventStreamFlow `json:"flow"`
|
||||||
EventStreamSink EventStreamSink `json:"sink"`
|
EventStreamSink EventStreamSink `json:"sink"`
|
||||||
|
|
@ -41,12 +45,14 @@ type EventStream struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventStreamFlow defines the flow characteristics of the event stream
|
// EventStreamFlow defines the flow characteristics of the event stream
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type EventStreamFlow struct {
|
type EventStreamFlow struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
PayloadColumn *string `json:"payloadColumn,omitempty"`
|
PayloadColumn *string `json:"payloadColumn,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventStreamSink defines the target of the event stream
|
// EventStreamSink defines the target of the event stream
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type EventStreamSink struct {
|
type EventStreamSink struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
EventType string `json:"eventType,omitempty"`
|
EventType string `json:"eventType,omitempty"`
|
||||||
|
|
@ -54,12 +60,14 @@ type EventStreamSink struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventStreamRecovery defines the target of dead letter queue
|
// EventStreamRecovery defines the target of dead letter queue
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type EventStreamRecovery struct {
|
type EventStreamRecovery struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Sink *EventStreamSink `json:"sink"`
|
Sink *EventStreamSink `json:"sink"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventStreamSource defines the source of the event stream and connection for FES operator
|
// EventStreamSource defines the source of the event stream and connection for FES operator
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type EventStreamSource struct {
|
type EventStreamSource struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Schema string `json:"schema,omitempty" defaults:"public"`
|
Schema string `json:"schema,omitempty" defaults:"public"`
|
||||||
|
|
@ -69,12 +77,14 @@ type EventStreamSource struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// EventStreamTable defines the name and ID column to be used for streaming
|
// EventStreamTable defines the name and ID column to be used for streaming
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type EventStreamTable struct {
|
type EventStreamTable struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
IDColumn *string `json:"idColumn,omitempty"`
|
IDColumn *string `json:"idColumn,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connection to be used for allowing the FES operator to connect to a database
|
// Connection to be used for allowing the FES operator to connect to a database
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type Connection struct {
|
type Connection struct {
|
||||||
Url string `json:"jdbcUrl"`
|
Url string `json:"jdbcUrl"`
|
||||||
SlotName string `json:"slotName"`
|
SlotName string `json:"slotName"`
|
||||||
|
|
@ -84,6 +94,7 @@ type Connection struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBAuth specifies the credentials to be used for connecting with the database
|
// DBAuth specifies the credentials to be used for connecting with the database
|
||||||
|
// +k8s:deepcopy-gen=true
|
||||||
type DBAuth struct {
|
type DBAuth struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,8 @@
|
||||||
|
//go:build !ignore_autogenerated
|
||||||
// +build !ignore_autogenerated
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2021 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -38,11 +39,11 @@ func (in *Connection) DeepCopyInto(out *Connection) {
|
||||||
*out = new(string)
|
*out = new(string)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
in.DBAuth.DeepCopyInto(&out.DBAuth)
|
out.DBAuth = in.DBAuth
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection.
|
||||||
func (in *Connection) DeepCopy() *Connection {
|
func (in *Connection) DeepCopy() *Connection {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -58,7 +59,7 @@ func (in *DBAuth) DeepCopyInto(out *DBAuth) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBAuth.
|
||||||
func (in *DBAuth) DeepCopy() *DBAuth {
|
func (in *DBAuth) DeepCopy() *DBAuth {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -72,13 +73,13 @@ func (in *DBAuth) DeepCopy() *DBAuth {
|
||||||
func (in *EventStream) DeepCopyInto(out *EventStream) {
|
func (in *EventStream) DeepCopyInto(out *EventStream) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.EventStreamFlow.DeepCopyInto(&out.EventStreamFlow)
|
in.EventStreamFlow.DeepCopyInto(&out.EventStreamFlow)
|
||||||
in.EventStreamRecovery.DeepCopyInto(&out.EventStreamRecovery)
|
|
||||||
in.EventStreamSink.DeepCopyInto(&out.EventStreamSink)
|
in.EventStreamSink.DeepCopyInto(&out.EventStreamSink)
|
||||||
in.EventStreamSource.DeepCopyInto(&out.EventStreamSource)
|
in.EventStreamSource.DeepCopyInto(&out.EventStreamSource)
|
||||||
|
in.EventStreamRecovery.DeepCopyInto(&out.EventStreamRecovery)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStream.
|
||||||
func (in *EventStream) DeepCopy() *EventStream {
|
func (in *EventStream) DeepCopy() *EventStream {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -99,7 +100,7 @@ func (in *EventStreamFlow) DeepCopyInto(out *EventStreamFlow) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamFlow.
|
||||||
func (in *EventStreamFlow) DeepCopy() *EventStreamFlow {
|
func (in *EventStreamFlow) DeepCopy() *EventStreamFlow {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -120,7 +121,7 @@ func (in *EventStreamRecovery) DeepCopyInto(out *EventStreamRecovery) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamRecovery.
|
||||||
func (in *EventStreamRecovery) DeepCopy() *EventStreamRecovery {
|
func (in *EventStreamRecovery) DeepCopy() *EventStreamRecovery {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -141,7 +142,7 @@ func (in *EventStreamSink) DeepCopyInto(out *EventStreamSink) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamSink.
|
||||||
func (in *EventStreamSink) DeepCopy() *EventStreamSink {
|
func (in *EventStreamSink) DeepCopy() *EventStreamSink {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -154,17 +155,17 @@ func (in *EventStreamSink) DeepCopy() *EventStreamSink {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *EventStreamSource) DeepCopyInto(out *EventStreamSource) {
|
func (in *EventStreamSource) DeepCopyInto(out *EventStreamSource) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.Connection.DeepCopyInto(&out.Connection)
|
in.EventStreamTable.DeepCopyInto(&out.EventStreamTable)
|
||||||
if in.Filter != nil {
|
if in.Filter != nil {
|
||||||
in, out := &in.Filter, &out.Filter
|
in, out := &in.Filter, &out.Filter
|
||||||
*out = new(string)
|
*out = new(string)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
in.EventStreamTable.DeepCopyInto(&out.EventStreamTable)
|
in.Connection.DeepCopyInto(&out.Connection)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamSource.
|
||||||
func (in *EventStreamSource) DeepCopy() *EventStreamSource {
|
func (in *EventStreamSource) DeepCopy() *EventStreamSource {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -185,7 +186,7 @@ func (in *EventStreamTable) DeepCopyInto(out *EventStreamTable) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventStreamTable.
|
||||||
func (in *EventStreamTable) DeepCopy() *EventStreamTable {
|
func (in *EventStreamTable) DeepCopy() *EventStreamTable {
|
||||||
if in == nil {
|
if in == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -195,30 +196,6 @@ func (in *EventStreamTable) DeepCopy() *EventStreamTable {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *FabricEventStreamSpec) DeepCopyInto(out *FabricEventStreamSpec) {
|
|
||||||
*out = *in
|
|
||||||
if in.EventStreams != nil {
|
|
||||||
in, out := &in.EventStreams, &out.EventStreams
|
|
||||||
*out = make([]EventStream, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricEventStreamSpec.
|
|
||||||
func (in *FabricEventStreamSpec) DeepCopy() *FabricEventStreamSpec {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(FabricEventStreamSpec)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *FabricEventStream) DeepCopyInto(out *FabricEventStream) {
|
func (in *FabricEventStream) DeepCopyInto(out *FabricEventStream) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|
@ -278,3 +255,26 @@ func (in *FabricEventStreamList) DeepCopyObject() runtime.Object {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *FabricEventStreamSpec) DeepCopyInto(out *FabricEventStreamSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.EventStreams != nil {
|
||||||
|
in, out := &in.EventStreams, &out.EventStreams
|
||||||
|
*out = make([]EventStream, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FabricEventStreamSpec.
|
||||||
|
func (in *FabricEventStreamSpec) DeepCopy() *FabricEventStreamSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(FabricEventStreamSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,7 @@ import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1 "k8s.io/api/policy/v1"
|
policyv1 "k8s.io/api/policy/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/equality"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
|
|
@ -271,25 +272,29 @@ func (c *Cluster) Create() (err error) {
|
||||||
)
|
)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
var (
|
currentStatus := c.Status.DeepCopy()
|
||||||
pgUpdatedStatus *acidv1.Postgresql
|
pg := c.Postgresql.DeepCopy()
|
||||||
errStatus error
|
pg.Status.PostgresClusterStatus = acidv1.ClusterStatusRunning
|
||||||
)
|
|
||||||
if err == nil {
|
if err != nil {
|
||||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running?
|
|
||||||
} else {
|
|
||||||
c.logger.Warningf("cluster created failed: %v", err)
|
c.logger.Warningf("cluster created failed: %v", err)
|
||||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed)
|
pg.Status.PostgresClusterStatus = acidv1.ClusterStatusAddFailed
|
||||||
}
|
}
|
||||||
if errStatus != nil {
|
|
||||||
c.logger.Warningf("could not set cluster status: %v", errStatus)
|
if !equality.Semantic.DeepEqual(currentStatus, pg.Status) {
|
||||||
|
pgUpdatedStatus, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), pg)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Warningf("could not set cluster status: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if pgUpdatedStatus != nil {
|
|
||||||
c.setSpec(pgUpdatedStatus)
|
c.setSpec(pgUpdatedStatus)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating)
|
pg := c.Postgresql.DeepCopy()
|
||||||
|
pg.Status.PostgresClusterStatus = acidv1.ClusterStatusCreating
|
||||||
|
|
||||||
|
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), pg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not set cluster status: %v", err)
|
return fmt.Errorf("could not set cluster status: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -977,7 +982,12 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating)
|
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusUpdating
|
||||||
|
|
||||||
|
newSpec, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), newSpec)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not set cluster status to updating: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||||
// do not apply any major version related changes yet
|
// do not apply any major version related changes yet
|
||||||
|
|
@ -986,19 +996,19 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
c.setSpec(newSpec)
|
c.setSpec(newSpec)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
var (
|
currentStatus := newSpec.Status.DeepCopy()
|
||||||
pgUpdatedStatus *acidv1.Postgresql
|
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusRunning
|
||||||
err error
|
|
||||||
)
|
|
||||||
if updateFailed {
|
if updateFailed {
|
||||||
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed)
|
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusUpdateFailed
|
||||||
} else {
|
|
||||||
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !equality.Semantic.DeepEqual(currentStatus, newSpec.Status) {
|
||||||
|
pgUpdatedStatus, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), newSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Warningf("could not set cluster status: %v", err)
|
c.logger.Warningf("could not set cluster status: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if pgUpdatedStatus != nil {
|
|
||||||
c.setSpec(pgUpdatedStatus)
|
c.setSpec(pgUpdatedStatus)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
|
||||||
|
|
@ -313,6 +313,14 @@ func (c *Cluster) generateResourceRequirements(
|
||||||
specLimits := acidv1.ResourceDescription{}
|
specLimits := acidv1.ResourceDescription{}
|
||||||
result := v1.ResourceRequirements{}
|
result := v1.ResourceRequirements{}
|
||||||
|
|
||||||
|
enforceThresholds := true
|
||||||
|
resourcesLimitAnnotationKey := c.OpConfig.IgnoreResourcesLimitsAnnotationKey
|
||||||
|
if resourcesLimitAnnotationKey != "" {
|
||||||
|
if value, exists := c.ObjectMeta.Annotations[resourcesLimitAnnotationKey]; exists && value == "true" {
|
||||||
|
enforceThresholds = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if resources != nil {
|
if resources != nil {
|
||||||
specRequests = resources.ResourceRequests
|
specRequests = resources.ResourceRequests
|
||||||
specLimits = resources.ResourceLimits
|
specLimits = resources.ResourceLimits
|
||||||
|
|
@ -329,7 +337,7 @@ func (c *Cluster) generateResourceRequirements(
|
||||||
}
|
}
|
||||||
|
|
||||||
// enforce minimum cpu and memory limits for Postgres containers only
|
// enforce minimum cpu and memory limits for Postgres containers only
|
||||||
if containerName == constants.PostgresContainerName {
|
if containerName == constants.PostgresContainerName && enforceThresholds {
|
||||||
if err = c.enforceMinResourceLimits(&result); err != nil {
|
if err = c.enforceMinResourceLimits(&result); err != nil {
|
||||||
return nil, fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
return nil, fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -344,7 +352,7 @@ func (c *Cluster) generateResourceRequirements(
|
||||||
}
|
}
|
||||||
|
|
||||||
// enforce maximum cpu and memory requests for Postgres containers only
|
// enforce maximum cpu and memory requests for Postgres containers only
|
||||||
if containerName == constants.PostgresContainerName {
|
if containerName == constants.PostgresContainerName && enforceThresholds {
|
||||||
if err = c.enforceMaxResourceRequests(&result); err != nil {
|
if err = c.enforceMaxResourceRequests(&result); err != nil {
|
||||||
return nil, fmt.Errorf("could not enforce maximum resource requests: %v", err)
|
return nil, fmt.Errorf("could not enforce maximum resource requests: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -412,7 +420,7 @@ PatroniInitDBParams:
|
||||||
}
|
}
|
||||||
|
|
||||||
if patroni.MaximumLagOnFailover >= 0 {
|
if patroni.MaximumLagOnFailover >= 0 {
|
||||||
config.Bootstrap.DCS.MaximumLagOnFailover = patroni.MaximumLagOnFailover
|
config.Bootstrap.DCS.MaximumLagOnFailover = float32(patroni.MaximumLagOnFailover)
|
||||||
}
|
}
|
||||||
if patroni.LoopWait != 0 {
|
if patroni.LoopWait != 0 {
|
||||||
config.Bootstrap.DCS.LoopWait = patroni.LoopWait
|
config.Bootstrap.DCS.LoopWait = patroni.LoopWait
|
||||||
|
|
@ -2199,23 +2207,29 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript
|
||||||
Value: description.StandbyPort,
|
Value: description.StandbyPort,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else {
|
if description.StandbyPrimarySlotName != "" {
|
||||||
c.logger.Info("standby cluster streaming from WAL location")
|
result = append(result, v1.EnvVar{
|
||||||
|
Name: "STANDBY_PRIMARY_SLOT_NAME",
|
||||||
|
Value: description.StandbyPrimarySlotName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WAL archive can be specified with or without standby_host
|
||||||
if description.S3WalPath != "" {
|
if description.S3WalPath != "" {
|
||||||
|
c.logger.Info("standby cluster using S3 WAL archive")
|
||||||
result = append(result, v1.EnvVar{
|
result = append(result, v1.EnvVar{
|
||||||
Name: "STANDBY_WALE_S3_PREFIX",
|
Name: "STANDBY_WALE_S3_PREFIX",
|
||||||
Value: description.S3WalPath,
|
Value: description.S3WalPath,
|
||||||
})
|
})
|
||||||
|
result = append(result, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"})
|
||||||
|
result = append(result, v1.EnvVar{Name: "STANDBY_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||||
} else if description.GSWalPath != "" {
|
} else if description.GSWalPath != "" {
|
||||||
|
c.logger.Info("standby cluster using GCS WAL archive")
|
||||||
result = append(result, v1.EnvVar{
|
result = append(result, v1.EnvVar{
|
||||||
Name: "STANDBY_WALE_GS_PREFIX",
|
Name: "STANDBY_WALE_GS_PREFIX",
|
||||||
Value: description.GSWalPath,
|
Value: description.GSWalPath,
|
||||||
})
|
})
|
||||||
} else {
|
|
||||||
c.logger.Error("no WAL path specified in standby section")
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
result = append(result, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"})
|
result = append(result, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"})
|
||||||
result = append(result, v1.EnvVar{Name: "STANDBY_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
result = append(result, v1.EnvVar{Name: "STANDBY_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1370,7 +1370,33 @@ func TestStandbyEnv(t *testing.T) {
|
||||||
envLen: 2,
|
envLen: 2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subTest: "from remote primary - ignore WAL path",
|
subTest: "from remote primary with S3 WAL path",
|
||||||
|
standbyOpts: &acidv1.StandbyDescription{
|
||||||
|
S3WalPath: "s3://some/path/",
|
||||||
|
StandbyHost: "remote-primary",
|
||||||
|
},
|
||||||
|
env: v1.EnvVar{
|
||||||
|
Name: "STANDBY_HOST",
|
||||||
|
Value: "remote-primary",
|
||||||
|
},
|
||||||
|
envPos: 0,
|
||||||
|
envLen: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "verify S3 WAL env with standby host",
|
||||||
|
standbyOpts: &acidv1.StandbyDescription{
|
||||||
|
S3WalPath: "s3://some/path/",
|
||||||
|
StandbyHost: "remote-primary",
|
||||||
|
},
|
||||||
|
env: v1.EnvVar{
|
||||||
|
Name: "STANDBY_WALE_S3_PREFIX",
|
||||||
|
Value: "s3://some/path/",
|
||||||
|
},
|
||||||
|
envPos: 1,
|
||||||
|
envLen: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "from remote primary with GCS WAL path",
|
||||||
standbyOpts: &acidv1.StandbyDescription{
|
standbyOpts: &acidv1.StandbyDescription{
|
||||||
GSWalPath: "gs://some/path/",
|
GSWalPath: "gs://some/path/",
|
||||||
StandbyHost: "remote-primary",
|
StandbyHost: "remote-primary",
|
||||||
|
|
@ -1380,7 +1406,20 @@ func TestStandbyEnv(t *testing.T) {
|
||||||
Value: "remote-primary",
|
Value: "remote-primary",
|
||||||
},
|
},
|
||||||
envPos: 0,
|
envPos: 0,
|
||||||
envLen: 1,
|
envLen: 4,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "from remote primary with slot name",
|
||||||
|
standbyOpts: &acidv1.StandbyDescription{
|
||||||
|
StandbyHost: "remote-primary",
|
||||||
|
StandbyPrimarySlotName: "my_slot",
|
||||||
|
},
|
||||||
|
env: v1.EnvVar{
|
||||||
|
Name: "STANDBY_PRIMARY_SLOT_NAME",
|
||||||
|
Value: "my_slot",
|
||||||
|
},
|
||||||
|
envPos: 1,
|
||||||
|
envLen: 2,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3130,6 +3169,9 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
PodRoleLabel: "spilo-role",
|
PodRoleLabel: "spilo-role",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
configWithEnabledIgnoreResourcesLimits := configResources
|
||||||
|
configWithEnabledIgnoreResourcesLimits.IgnoreResourcesLimitsAnnotationKey = "zalando.org/ignore-resources-limits"
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
subTest string
|
subTest string
|
||||||
config config.Config
|
config config.Config
|
||||||
|
|
@ -3465,7 +3507,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
{
|
{
|
||||||
subTest: "test enforcing min cpu and memory limit",
|
subTest: "test enforcing min cpu and memory limit",
|
||||||
config: config.Config{
|
config: config.Config{
|
||||||
Resources: configResources,
|
Resources: configWithEnabledIgnoreResourcesLimits,
|
||||||
PodManagementPolicy: "ordered_ready",
|
PodManagementPolicy: "ordered_ready",
|
||||||
SetMemoryRequestToLimit: false,
|
SetMemoryRequestToLimit: false,
|
||||||
},
|
},
|
||||||
|
|
@ -3473,6 +3515,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: clusterName,
|
Name: clusterName,
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
|
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "false"},
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
|
|
@ -3490,6 +3533,35 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("250m"), Memory: k8sutil.StringToPointer("250Mi")},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("250m"), Memory: k8sutil.StringToPointer("250Mi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
subTest: "ingnore min cpu and memory limit threshold",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: configWithEnabledIgnoreResourcesLimits,
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
SetMemoryRequestToLimit: false,
|
||||||
|
},
|
||||||
|
pgSpec: acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "true"},
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("200m"), Memory: k8sutil.StringToPointer("200Mi")},
|
||||||
|
},
|
||||||
|
TeamID: "acid",
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("200m"), Memory: k8sutil.StringToPointer("200Mi")},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
subTest: "test min cpu and memory limit are not enforced on sidecar",
|
subTest: "test min cpu and memory limit are not enforced on sidecar",
|
||||||
config: config.Config{
|
config: config.Config{
|
||||||
|
|
@ -3527,7 +3599,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
{
|
{
|
||||||
subTest: "test enforcing max cpu and memory requests",
|
subTest: "test enforcing max cpu and memory requests",
|
||||||
config: config.Config{
|
config: config.Config{
|
||||||
Resources: configResources,
|
Resources: configWithEnabledIgnoreResourcesLimits,
|
||||||
PodManagementPolicy: "ordered_ready",
|
PodManagementPolicy: "ordered_ready",
|
||||||
SetMemoryRequestToLimit: false,
|
SetMemoryRequestToLimit: false,
|
||||||
},
|
},
|
||||||
|
|
@ -3535,6 +3607,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: clusterName,
|
Name: clusterName,
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
|
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "yes"},
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresSpec{
|
Spec: acidv1.PostgresSpec{
|
||||||
Resources: &acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
|
|
@ -3552,6 +3625,35 @@ func TestGenerateResourceRequirements(t *testing.T) {
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
subTest: "ignore max cpu and memory requests limit",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: configWithEnabledIgnoreResourcesLimits,
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
SetMemoryRequestToLimit: false,
|
||||||
|
},
|
||||||
|
pgSpec: acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
Annotations: map[string]string{"zalando.org/ignore-resources-limits": "true"},
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
|
||||||
|
},
|
||||||
|
TeamID: "acid",
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedResources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
subTest: "test SetMemoryRequestToLimit flag but raise only until max memory request",
|
subTest: "test SetMemoryRequestToLimit flag but raise only until max memory request",
|
||||||
config: config.Config{
|
config: config.Config{
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,7 @@ import (
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policyv1 "k8s.io/api/policy/v1"
|
policyv1 "k8s.io/api/policy/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/equality"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
@ -43,20 +44,19 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
c.setSpec(newSpec)
|
c.setSpec(newSpec)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
var (
|
|
||||||
pgUpdatedStatus *acidv1.Postgresql
|
|
||||||
errStatus error
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Warningf("error while syncing cluster state: %v", err)
|
c.logger.Warningf("error while syncing cluster state: %v", err)
|
||||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed)
|
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusSyncFailed
|
||||||
} else if !c.Status.Running() {
|
} else if !c.Status.Running() {
|
||||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
|
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusRunning
|
||||||
}
|
}
|
||||||
if errStatus != nil {
|
|
||||||
c.logger.Warningf("could not set cluster status: %v", errStatus)
|
if !equality.Semantic.DeepEqual(oldSpec.Status, newSpec.Status) {
|
||||||
|
pgUpdatedStatus, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), newSpec)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Warningf("could not set cluster status: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if pgUpdatedStatus != nil {
|
|
||||||
c.setSpec(pgUpdatedStatus)
|
c.setSpec(pgUpdatedStatus)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
@ -1030,6 +1030,23 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
|
||||||
standbyOptionsToSet["create_replica_methods"] = []string{"bootstrap_standby_with_wale", "basebackup_fast_xlog"}
|
standbyOptionsToSet["create_replica_methods"] = []string{"bootstrap_standby_with_wale", "basebackup_fast_xlog"}
|
||||||
standbyOptionsToSet["restore_command"] = "envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\""
|
standbyOptionsToSet["restore_command"] = "envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\""
|
||||||
|
|
||||||
|
if c.Spec.StandbyCluster.StandbyHost != "" {
|
||||||
|
standbyOptionsToSet["host"] = c.Spec.StandbyCluster.StandbyHost
|
||||||
|
} else {
|
||||||
|
standbyOptionsToSet["host"] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Spec.StandbyCluster.StandbyPort != "" {
|
||||||
|
standbyOptionsToSet["port"] = c.Spec.StandbyCluster.StandbyPort
|
||||||
|
} else {
|
||||||
|
standbyOptionsToSet["port"] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Spec.StandbyCluster.StandbyPrimarySlotName != "" {
|
||||||
|
standbyOptionsToSet["primary_slot_name"] = c.Spec.StandbyCluster.StandbyPrimarySlotName
|
||||||
|
} else {
|
||||||
|
standbyOptionsToSet["primary_slot_name"] = nil
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
c.logger.Infof("promoting standby cluster and detach from source")
|
c.logger.Infof("promoting standby cluster and detach from source")
|
||||||
standbyOptionsToSet = nil
|
standbyOptionsToSet = nil
|
||||||
|
|
|
||||||
|
|
@ -801,6 +801,41 @@ func TestSyncStandbyClusterConfiguration(t *testing.T) {
|
||||||
// this should update the Patroni config again
|
// this should update the Patroni config again
|
||||||
err = cluster.syncStandbyClusterConfiguration()
|
err = cluster.syncStandbyClusterConfiguration()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// test with standby_host, standby_port and standby_primary_slot_name
|
||||||
|
cluster.Spec.StandbyCluster = &acidv1.StandbyDescription{
|
||||||
|
StandbyHost: "remote-primary.example.com",
|
||||||
|
StandbyPort: "5433",
|
||||||
|
StandbyPrimarySlotName: "standby_slot",
|
||||||
|
}
|
||||||
|
cluster.syncStatefulSet()
|
||||||
|
updatedSts4 := cluster.Statefulset
|
||||||
|
|
||||||
|
// check that pods have all three STANDBY_* environment variables
|
||||||
|
assert.Contains(t, updatedSts4.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_HOST", Value: "remote-primary.example.com"})
|
||||||
|
assert.Contains(t, updatedSts4.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_PORT", Value: "5433"})
|
||||||
|
assert.Contains(t, updatedSts4.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_PRIMARY_SLOT_NAME", Value: "standby_slot"})
|
||||||
|
|
||||||
|
// this should update the Patroni config with host, port and primary_slot_name
|
||||||
|
err = cluster.syncStandbyClusterConfiguration()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// test property deletion: remove standby_primary_slot_name
|
||||||
|
cluster.Spec.StandbyCluster = &acidv1.StandbyDescription{
|
||||||
|
StandbyHost: "remote-primary.example.com",
|
||||||
|
StandbyPort: "5433",
|
||||||
|
}
|
||||||
|
cluster.syncStatefulSet()
|
||||||
|
updatedSts5 := cluster.Statefulset
|
||||||
|
|
||||||
|
// check that STANDBY_PRIMARY_SLOT_NAME is not present
|
||||||
|
assert.Contains(t, updatedSts5.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_HOST", Value: "remote-primary.example.com"})
|
||||||
|
assert.Contains(t, updatedSts5.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_PORT", Value: "5433"})
|
||||||
|
assert.NotContains(t, updatedSts5.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_PRIMARY_SLOT_NAME", Value: "standby_slot"})
|
||||||
|
|
||||||
|
// this should update the Patroni config and set primary_slot_name to nil
|
||||||
|
err = cluster.syncStandbyClusterConfiguration()
|
||||||
|
assert.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateSecret(t *testing.T) {
|
func TestUpdateSecret(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -288,6 +288,12 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add postgresql cluster to fake client
|
||||||
|
_, err := client.PostgresqlsGetter.Postgresqls(namespace).Create(context.TODO(), &pg, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
cluster := New(
|
cluster := New(
|
||||||
Config{
|
Config{
|
||||||
OpConfig: config.Config{
|
OpConfig: config.Config{
|
||||||
|
|
@ -321,7 +327,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
|
||||||
}, client, pg, logger, eventRecorder)
|
}, client, pg, logger, eventRecorder)
|
||||||
cluster.Name = clusterName
|
cluster.Name = clusterName
|
||||||
cluster.Namespace = namespace
|
cluster.Namespace = namespace
|
||||||
_, err := cluster.createStatefulSet()
|
_, err = cluster.createStatefulSet()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.MinInstances = fromCRD.MinInstances
|
result.MinInstances = fromCRD.MinInstances
|
||||||
result.MaxInstances = fromCRD.MaxInstances
|
result.MaxInstances = fromCRD.MaxInstances
|
||||||
result.IgnoreInstanceLimitsAnnotationKey = fromCRD.IgnoreInstanceLimitsAnnotationKey
|
result.IgnoreInstanceLimitsAnnotationKey = fromCRD.IgnoreInstanceLimitsAnnotationKey
|
||||||
|
result.IgnoreResourcesLimitsAnnotationKey = fromCRD.IgnoreResourcesLimitsAnnotationKey
|
||||||
result.ResyncPeriod = util.CoalesceDuration(time.Duration(fromCRD.ResyncPeriod), "30m")
|
result.ResyncPeriod = util.CoalesceDuration(time.Duration(fromCRD.ResyncPeriod), "30m")
|
||||||
result.RepairPeriod = util.CoalesceDuration(time.Duration(fromCRD.RepairPeriod), "5m")
|
result.RepairPeriod = util.CoalesceDuration(time.Duration(fromCRD.RepairPeriod), "5m")
|
||||||
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
||||||
|
|
|
||||||
|
|
@ -161,7 +161,8 @@ func (c *Controller) acquireInitialListOfClusters() error {
|
||||||
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) (*cluster.Cluster, error) {
|
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) (*cluster.Cluster, error) {
|
||||||
if c.opConfig.EnableTeamIdClusternamePrefix {
|
if c.opConfig.EnableTeamIdClusternamePrefix {
|
||||||
if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil {
|
if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil {
|
||||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid)
|
pgSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusInvalid
|
||||||
|
c.KubeClient.SetPostgresCRDStatus(clusterName, pgSpec)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -470,13 +471,25 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.
|
||||||
|
|
||||||
switch eventType {
|
switch eventType {
|
||||||
case EventAdd:
|
case EventAdd:
|
||||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed)
|
informerNewSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusAddFailed
|
||||||
|
_, err := c.KubeClient.SetPostgresCRDStatus(clusterName, informerNewSpec)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.WithField("cluster-name", clusterName).Errorf("could not set PostgresCRD status: %v", err)
|
||||||
|
}
|
||||||
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError)
|
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError)
|
||||||
case EventUpdate:
|
case EventUpdate:
|
||||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed)
|
informerNewSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusUpdateFailed
|
||||||
|
_, err := c.KubeClient.SetPostgresCRDStatus(clusterName, informerNewSpec)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.WithField("cluster-name", clusterName).Errorf("could not set PostgresCRD status: %v", err)
|
||||||
|
}
|
||||||
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError)
|
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError)
|
||||||
default:
|
default:
|
||||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed)
|
informerNewSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusSyncFailed
|
||||||
|
_, err := c.KubeClient.SetPostgresCRDStatus(clusterName, informerNewSpec)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.WithField("cluster-name", clusterName).Errorf("could not set PostgresCRD status: %v", err)
|
||||||
|
}
|
||||||
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError)
|
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,14 +2,12 @@ package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
|
@ -65,15 +63,9 @@ func (c *Controller) createOperatorCRD(desiredCrd *apiextv1.CustomResourceDefini
|
||||||
}
|
}
|
||||||
if crd != nil {
|
if crd != nil {
|
||||||
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
||||||
// copy annotations and labels from existing CRD since we do not define them
|
crd.Spec = desiredCrd.Spec
|
||||||
desiredCrd.Annotations = crd.Annotations
|
_, err := c.KubeClient.CustomResourceDefinitions().Update(context.TODO(), crd, metav1.UpdateOptions{})
|
||||||
desiredCrd.Labels = crd.Labels
|
|
||||||
patch, err := json.Marshal(desiredCrd)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not marshal new customResourceDefintion %q: %v", desiredCrd.Name, err)
|
|
||||||
}
|
|
||||||
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(
|
|
||||||
context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
|
||||||
return fmt.Errorf("could not update customResourceDefinition %q: %v", crd.Name, err)
|
return fmt.Errorf("could not update customResourceDefinition %q: %v", crd.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -103,7 +95,11 @@ func (c *Controller) createOperatorCRD(desiredCrd *apiextv1.CustomResourceDefini
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) createPostgresCRD() error {
|
func (c *Controller) createPostgresCRD() error {
|
||||||
return c.createOperatorCRD(acidv1.PostgresCRD(c.opConfig.CRDCategories))
|
crd, err := acidv1.PostgresCRD(c.opConfig.CRDCategories)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not create Postgres CRD object: %v", err)
|
||||||
|
}
|
||||||
|
return c.createOperatorCRD(crd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) createConfigurationCRD() error {
|
func (c *Controller) createConfigurationCRD() error {
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,8 +25,8 @@ SOFTWARE.
|
||||||
package versioned
|
package versioned
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
fmt "fmt"
|
||||||
"net/http"
|
http "net/http"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||||
zalandov1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/zalando.org/v1"
|
zalandov1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/zalando.org/v1"
|
||||||
|
|
@ -41,8 +41,7 @@ type Interface interface {
|
||||||
ZalandoV1() zalandov1.ZalandoV1Interface
|
ZalandoV1() zalandov1.ZalandoV1Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clientset contains the clients for groups. Each group has exactly one
|
// Clientset contains the clients for groups.
|
||||||
// version included in a Clientset.
|
|
||||||
type Clientset struct {
|
type Clientset struct {
|
||||||
*discovery.DiscoveryClient
|
*discovery.DiscoveryClient
|
||||||
acidV1 *acidv1.AcidV1Client
|
acidV1 *acidv1.AcidV1Client
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -39,8 +39,12 @@ import (
|
||||||
|
|
||||||
// NewSimpleClientset returns a clientset that will respond with the provided objects.
|
// NewSimpleClientset returns a clientset that will respond with the provided objects.
|
||||||
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
|
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
|
||||||
// without applying any validations and/or defaults. It shouldn't be considered a replacement
|
// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
|
||||||
// for a real clientset and is mostly useful in simple unit tests.
|
// for a real clientset and is mostly useful in simple unit tests.
|
||||||
|
//
|
||||||
|
// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves
|
||||||
|
// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
|
||||||
|
// via --with-applyconfig).
|
||||||
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
|
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
|
||||||
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
|
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
|
||||||
for _, obj := range objects {
|
for _, obj := range objects {
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,10 +25,10 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
http "net/http"
|
||||||
|
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||||
rest "k8s.io/client-go/rest"
|
rest "k8s.io/client-go/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -101,10 +101,10 @@ func New(c rest.Interface) *AcidV1Client {
|
||||||
}
|
}
|
||||||
|
|
||||||
func setConfigDefaults(config *rest.Config) error {
|
func setConfigDefaults(config *rest.Config) error {
|
||||||
gv := v1.SchemeGroupVersion
|
gv := acidzalandov1.SchemeGroupVersion
|
||||||
config.GroupVersion = &gv
|
config.GroupVersion = &gv
|
||||||
config.APIPath = "/apis"
|
config.APIPath = "/apis"
|
||||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
|
||||||
|
|
||||||
if config.UserAgent == "" {
|
if config.UserAgent == "" {
|
||||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -35,15 +35,15 @@ type FakeAcidV1 struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FakeAcidV1) OperatorConfigurations(namespace string) v1.OperatorConfigurationInterface {
|
func (c *FakeAcidV1) OperatorConfigurations(namespace string) v1.OperatorConfigurationInterface {
|
||||||
return &FakeOperatorConfigurations{c, namespace}
|
return newFakeOperatorConfigurations(c, namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FakeAcidV1) PostgresTeams(namespace string) v1.PostgresTeamInterface {
|
func (c *FakeAcidV1) PostgresTeams(namespace string) v1.PostgresTeamInterface {
|
||||||
return &FakePostgresTeams{c, namespace}
|
return newFakePostgresTeams(c, namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FakeAcidV1) Postgresqls(namespace string) v1.PostgresqlInterface {
|
func (c *FakeAcidV1) Postgresqls(namespace string) v1.PostgresqlInterface {
|
||||||
return &FakePostgresqls{c, namespace}
|
return newFakePostgresqls(c, namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RESTClient returns a RESTClient that is used to communicate
|
// RESTClient returns a RESTClient that is used to communicate
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,31 +25,26 @@ SOFTWARE.
|
||||||
package fake
|
package fake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
gentype "k8s.io/client-go/gentype"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
testing "k8s.io/client-go/testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// FakeOperatorConfigurations implements OperatorConfigurationInterface
|
// fakeOperatorConfigurations implements OperatorConfigurationInterface
|
||||||
type FakeOperatorConfigurations struct {
|
type fakeOperatorConfigurations struct {
|
||||||
|
*gentype.FakeClient[*v1.OperatorConfiguration]
|
||||||
Fake *FakeAcidV1
|
Fake *FakeAcidV1
|
||||||
ns string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var operatorconfigurationsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "operatorconfigurations"}
|
func newFakeOperatorConfigurations(fake *FakeAcidV1, namespace string) acidzalandov1.OperatorConfigurationInterface {
|
||||||
|
return &fakeOperatorConfigurations{
|
||||||
var operatorconfigurationsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "OperatorConfiguration"}
|
gentype.NewFakeClient[*v1.OperatorConfiguration](
|
||||||
|
fake.Fake,
|
||||||
// Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any.
|
namespace,
|
||||||
func (c *FakeOperatorConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) {
|
v1.SchemeGroupVersion.WithResource("operatorconfigurations"),
|
||||||
obj, err := c.Fake.
|
v1.SchemeGroupVersion.WithKind("OperatorConfiguration"),
|
||||||
Invokes(testing.NewGetAction(operatorconfigurationsResource, c.ns, name), &acidzalandov1.OperatorConfiguration{})
|
func() *v1.OperatorConfiguration { return &v1.OperatorConfiguration{} },
|
||||||
|
),
|
||||||
if obj == nil {
|
fake,
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
return obj.(*acidzalandov1.OperatorConfiguration), err
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,124 +25,30 @@ SOFTWARE.
|
||||||
package fake
|
package fake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
gentype "k8s.io/client-go/gentype"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
labels "k8s.io/apimachinery/pkg/labels"
|
|
||||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
types "k8s.io/apimachinery/pkg/types"
|
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
|
||||||
testing "k8s.io/client-go/testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// FakePostgresqls implements PostgresqlInterface
|
// fakePostgresqls implements PostgresqlInterface
|
||||||
type FakePostgresqls struct {
|
type fakePostgresqls struct {
|
||||||
|
*gentype.FakeClientWithList[*v1.Postgresql, *v1.PostgresqlList]
|
||||||
Fake *FakeAcidV1
|
Fake *FakeAcidV1
|
||||||
ns string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var postgresqlsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "postgresqls"}
|
func newFakePostgresqls(fake *FakeAcidV1, namespace string) acidzalandov1.PostgresqlInterface {
|
||||||
|
return &fakePostgresqls{
|
||||||
var postgresqlsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "Postgresql"}
|
gentype.NewFakeClientWithList[*v1.Postgresql, *v1.PostgresqlList](
|
||||||
|
fake.Fake,
|
||||||
// Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any.
|
namespace,
|
||||||
func (c *FakePostgresqls) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.Postgresql, err error) {
|
v1.SchemeGroupVersion.WithResource("postgresqls"),
|
||||||
obj, err := c.Fake.
|
v1.SchemeGroupVersion.WithKind("Postgresql"),
|
||||||
Invokes(testing.NewGetAction(postgresqlsResource, c.ns, name), &acidzalandov1.Postgresql{})
|
func() *v1.Postgresql { return &v1.Postgresql{} },
|
||||||
|
func() *v1.PostgresqlList { return &v1.PostgresqlList{} },
|
||||||
if obj == nil {
|
func(dst, src *v1.PostgresqlList) { dst.ListMeta = src.ListMeta },
|
||||||
return nil, err
|
func(list *v1.PostgresqlList) []*v1.Postgresql { return gentype.ToPointerSlice(list.Items) },
|
||||||
|
func(list *v1.PostgresqlList, items []*v1.Postgresql) { list.Items = gentype.FromPointerSlice(items) },
|
||||||
|
),
|
||||||
|
fake,
|
||||||
}
|
}
|
||||||
return obj.(*acidzalandov1.Postgresql), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// List takes label and field selectors, and returns the list of Postgresqls that match those selectors.
|
|
||||||
func (c *FakePostgresqls) List(ctx context.Context, opts v1.ListOptions) (result *acidzalandov1.PostgresqlList, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewListAction(postgresqlsResource, postgresqlsKind, c.ns, opts), &acidzalandov1.PostgresqlList{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
|
||||||
if label == nil {
|
|
||||||
label = labels.Everything()
|
|
||||||
}
|
|
||||||
list := &acidzalandov1.PostgresqlList{ListMeta: obj.(*acidzalandov1.PostgresqlList).ListMeta}
|
|
||||||
for _, item := range obj.(*acidzalandov1.PostgresqlList).Items {
|
|
||||||
if label.Matches(labels.Set(item.Labels)) {
|
|
||||||
list.Items = append(list.Items, item)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return list, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch returns a watch.Interface that watches the requested postgresqls.
|
|
||||||
func (c *FakePostgresqls) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
|
||||||
return c.Fake.
|
|
||||||
InvokesWatch(testing.NewWatchAction(postgresqlsResource, c.ns, opts))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
|
||||||
func (c *FakePostgresqls) Create(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts v1.CreateOptions) (result *acidzalandov1.Postgresql, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewCreateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*acidzalandov1.Postgresql), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
|
||||||
func (c *FakePostgresqls) Update(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts v1.UpdateOptions) (result *acidzalandov1.Postgresql, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewUpdateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*acidzalandov1.Postgresql), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateStatus was generated because the type contains a Status member.
|
|
||||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
|
||||||
func (c *FakePostgresqls) UpdateStatus(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts v1.UpdateOptions) (*acidzalandov1.Postgresql, error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewUpdateSubresourceAction(postgresqlsResource, "status", c.ns, postgresql), &acidzalandov1.Postgresql{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*acidzalandov1.Postgresql), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete takes name of the postgresql and deletes it. Returns an error if one occurs.
|
|
||||||
func (c *FakePostgresqls) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
|
||||||
_, err := c.Fake.
|
|
||||||
Invokes(testing.NewDeleteActionWithOptions(postgresqlsResource, c.ns, name, opts), &acidzalandov1.Postgresql{})
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteCollection deletes a collection of objects.
|
|
||||||
func (c *FakePostgresqls) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
|
||||||
action := testing.NewDeleteCollectionAction(postgresqlsResource, c.ns, listOpts)
|
|
||||||
|
|
||||||
_, err := c.Fake.Invokes(action, &acidzalandov1.PostgresqlList{})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch applies the patch and returns the patched postgresql.
|
|
||||||
func (c *FakePostgresqls) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *acidzalandov1.Postgresql, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewPatchSubresourceAction(postgresqlsResource, c.ns, name, pt, data, subresources...), &acidzalandov1.Postgresql{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*acidzalandov1.Postgresql), err
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,112 +25,32 @@ SOFTWARE.
|
||||||
package fake
|
package fake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
gentype "k8s.io/client-go/gentype"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
labels "k8s.io/apimachinery/pkg/labels"
|
|
||||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
types "k8s.io/apimachinery/pkg/types"
|
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
|
||||||
testing "k8s.io/client-go/testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// FakePostgresTeams implements PostgresTeamInterface
|
// fakePostgresTeams implements PostgresTeamInterface
|
||||||
type FakePostgresTeams struct {
|
type fakePostgresTeams struct {
|
||||||
|
*gentype.FakeClientWithList[*v1.PostgresTeam, *v1.PostgresTeamList]
|
||||||
Fake *FakeAcidV1
|
Fake *FakeAcidV1
|
||||||
ns string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var postgresteamsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "postgresteams"}
|
func newFakePostgresTeams(fake *FakeAcidV1, namespace string) acidzalandov1.PostgresTeamInterface {
|
||||||
|
return &fakePostgresTeams{
|
||||||
var postgresteamsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "PostgresTeam"}
|
gentype.NewFakeClientWithList[*v1.PostgresTeam, *v1.PostgresTeamList](
|
||||||
|
fake.Fake,
|
||||||
// Get takes name of the postgresTeam, and returns the corresponding postgresTeam object, and an error if there is any.
|
namespace,
|
||||||
func (c *FakePostgresTeams) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.PostgresTeam, err error) {
|
v1.SchemeGroupVersion.WithResource("postgresteams"),
|
||||||
obj, err := c.Fake.
|
v1.SchemeGroupVersion.WithKind("PostgresTeam"),
|
||||||
Invokes(testing.NewGetAction(postgresteamsResource, c.ns, name), &acidzalandov1.PostgresTeam{})
|
func() *v1.PostgresTeam { return &v1.PostgresTeam{} },
|
||||||
|
func() *v1.PostgresTeamList { return &v1.PostgresTeamList{} },
|
||||||
if obj == nil {
|
func(dst, src *v1.PostgresTeamList) { dst.ListMeta = src.ListMeta },
|
||||||
return nil, err
|
func(list *v1.PostgresTeamList) []*v1.PostgresTeam { return gentype.ToPointerSlice(list.Items) },
|
||||||
|
func(list *v1.PostgresTeamList, items []*v1.PostgresTeam) {
|
||||||
|
list.Items = gentype.FromPointerSlice(items)
|
||||||
|
},
|
||||||
|
),
|
||||||
|
fake,
|
||||||
}
|
}
|
||||||
return obj.(*acidzalandov1.PostgresTeam), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// List takes label and field selectors, and returns the list of PostgresTeams that match those selectors.
|
|
||||||
func (c *FakePostgresTeams) List(ctx context.Context, opts v1.ListOptions) (result *acidzalandov1.PostgresTeamList, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewListAction(postgresteamsResource, postgresteamsKind, c.ns, opts), &acidzalandov1.PostgresTeamList{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
|
||||||
if label == nil {
|
|
||||||
label = labels.Everything()
|
|
||||||
}
|
|
||||||
list := &acidzalandov1.PostgresTeamList{ListMeta: obj.(*acidzalandov1.PostgresTeamList).ListMeta}
|
|
||||||
for _, item := range obj.(*acidzalandov1.PostgresTeamList).Items {
|
|
||||||
if label.Matches(labels.Set(item.Labels)) {
|
|
||||||
list.Items = append(list.Items, item)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return list, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch returns a watch.Interface that watches the requested postgresTeams.
|
|
||||||
func (c *FakePostgresTeams) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
|
||||||
return c.Fake.
|
|
||||||
InvokesWatch(testing.NewWatchAction(postgresteamsResource, c.ns, opts))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create takes the representation of a postgresTeam and creates it. Returns the server's representation of the postgresTeam, and an error, if there is any.
|
|
||||||
func (c *FakePostgresTeams) Create(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts v1.CreateOptions) (result *acidzalandov1.PostgresTeam, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewCreateAction(postgresteamsResource, c.ns, postgresTeam), &acidzalandov1.PostgresTeam{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*acidzalandov1.PostgresTeam), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update takes the representation of a postgresTeam and updates it. Returns the server's representation of the postgresTeam, and an error, if there is any.
|
|
||||||
func (c *FakePostgresTeams) Update(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts v1.UpdateOptions) (result *acidzalandov1.PostgresTeam, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewUpdateAction(postgresteamsResource, c.ns, postgresTeam), &acidzalandov1.PostgresTeam{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*acidzalandov1.PostgresTeam), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete takes name of the postgresTeam and deletes it. Returns an error if one occurs.
|
|
||||||
func (c *FakePostgresTeams) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
|
||||||
_, err := c.Fake.
|
|
||||||
Invokes(testing.NewDeleteActionWithOptions(postgresteamsResource, c.ns, name, opts), &acidzalandov1.PostgresTeam{})
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteCollection deletes a collection of objects.
|
|
||||||
func (c *FakePostgresTeams) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
|
||||||
action := testing.NewDeleteCollectionAction(postgresteamsResource, c.ns, listOpts)
|
|
||||||
|
|
||||||
_, err := c.Fake.Invokes(action, &acidzalandov1.PostgresTeamList{})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch applies the patch and returns the patched postgresTeam.
|
|
||||||
func (c *FakePostgresTeams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *acidzalandov1.PostgresTeam, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewPatchSubresourceAction(postgresteamsResource, c.ns, name, pt, data, subresources...), &acidzalandov1.PostgresTeam{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*acidzalandov1.PostgresTeam), err
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,12 +25,12 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
context "context"
|
||||||
|
|
||||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
rest "k8s.io/client-go/rest"
|
gentype "k8s.io/client-go/gentype"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OperatorConfigurationsGetter has a method to return a OperatorConfigurationInterface.
|
// OperatorConfigurationsGetter has a method to return a OperatorConfigurationInterface.
|
||||||
|
|
@ -41,33 +41,24 @@ type OperatorConfigurationsGetter interface {
|
||||||
|
|
||||||
// OperatorConfigurationInterface has methods to work with OperatorConfiguration resources.
|
// OperatorConfigurationInterface has methods to work with OperatorConfiguration resources.
|
||||||
type OperatorConfigurationInterface interface {
|
type OperatorConfigurationInterface interface {
|
||||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*acidzalandov1.OperatorConfiguration, error)
|
Get(ctx context.Context, name string, opts metav1.GetOptions) (*acidzalandov1.OperatorConfiguration, error)
|
||||||
OperatorConfigurationExpansion
|
OperatorConfigurationExpansion
|
||||||
}
|
}
|
||||||
|
|
||||||
// operatorConfigurations implements OperatorConfigurationInterface
|
// operatorConfigurations implements OperatorConfigurationInterface
|
||||||
type operatorConfigurations struct {
|
type operatorConfigurations struct {
|
||||||
client rest.Interface
|
*gentype.Client[*acidzalandov1.OperatorConfiguration]
|
||||||
ns string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newOperatorConfigurations returns a OperatorConfigurations
|
// newOperatorConfigurations returns a OperatorConfigurations
|
||||||
func newOperatorConfigurations(c *AcidV1Client, namespace string) *operatorConfigurations {
|
func newOperatorConfigurations(c *AcidV1Client, namespace string) *operatorConfigurations {
|
||||||
return &operatorConfigurations{
|
return &operatorConfigurations{
|
||||||
client: c.RESTClient(),
|
gentype.NewClient[*acidzalandov1.OperatorConfiguration](
|
||||||
ns: namespace,
|
"operatorconfigurations",
|
||||||
|
c.RESTClient(),
|
||||||
|
scheme.ParameterCodec,
|
||||||
|
namespace,
|
||||||
|
func() *acidzalandov1.OperatorConfiguration { return &acidzalandov1.OperatorConfiguration{} },
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any.
|
|
||||||
func (c *operatorConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) {
|
|
||||||
result = &acidzalandov1.OperatorConfiguration{}
|
|
||||||
err = c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("operatorconfigurations").
|
|
||||||
Name(name).
|
|
||||||
VersionedParams(&options, scheme.ParameterCodec).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,15 +25,14 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
context "context"
|
||||||
"time"
|
|
||||||
|
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
types "k8s.io/apimachinery/pkg/types"
|
types "k8s.io/apimachinery/pkg/types"
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
rest "k8s.io/client-go/rest"
|
gentype "k8s.io/client-go/gentype"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PostgresqlsGetter has a method to return a PostgresqlInterface.
|
// PostgresqlsGetter has a method to return a PostgresqlInterface.
|
||||||
|
|
@ -44,158 +43,34 @@ type PostgresqlsGetter interface {
|
||||||
|
|
||||||
// PostgresqlInterface has methods to work with Postgresql resources.
|
// PostgresqlInterface has methods to work with Postgresql resources.
|
||||||
type PostgresqlInterface interface {
|
type PostgresqlInterface interface {
|
||||||
Create(ctx context.Context, postgresql *v1.Postgresql, opts metav1.CreateOptions) (*v1.Postgresql, error)
|
Create(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts metav1.CreateOptions) (*acidzalandov1.Postgresql, error)
|
||||||
Update(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (*v1.Postgresql, error)
|
Update(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts metav1.UpdateOptions) (*acidzalandov1.Postgresql, error)
|
||||||
UpdateStatus(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (*v1.Postgresql, error)
|
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||||
|
UpdateStatus(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts metav1.UpdateOptions) (*acidzalandov1.Postgresql, error)
|
||||||
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
||||||
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
||||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Postgresql, error)
|
Get(ctx context.Context, name string, opts metav1.GetOptions) (*acidzalandov1.Postgresql, error)
|
||||||
List(ctx context.Context, opts metav1.ListOptions) (*v1.PostgresqlList, error)
|
List(ctx context.Context, opts metav1.ListOptions) (*acidzalandov1.PostgresqlList, error)
|
||||||
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
||||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Postgresql, err error)
|
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *acidzalandov1.Postgresql, err error)
|
||||||
PostgresqlExpansion
|
PostgresqlExpansion
|
||||||
}
|
}
|
||||||
|
|
||||||
// postgresqls implements PostgresqlInterface
|
// postgresqls implements PostgresqlInterface
|
||||||
type postgresqls struct {
|
type postgresqls struct {
|
||||||
client rest.Interface
|
*gentype.ClientWithList[*acidzalandov1.Postgresql, *acidzalandov1.PostgresqlList]
|
||||||
ns string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPostgresqls returns a Postgresqls
|
// newPostgresqls returns a Postgresqls
|
||||||
func newPostgresqls(c *AcidV1Client, namespace string) *postgresqls {
|
func newPostgresqls(c *AcidV1Client, namespace string) *postgresqls {
|
||||||
return &postgresqls{
|
return &postgresqls{
|
||||||
client: c.RESTClient(),
|
gentype.NewClientWithList[*acidzalandov1.Postgresql, *acidzalandov1.PostgresqlList](
|
||||||
ns: namespace,
|
"postgresqls",
|
||||||
|
c.RESTClient(),
|
||||||
|
scheme.ParameterCodec,
|
||||||
|
namespace,
|
||||||
|
func() *acidzalandov1.Postgresql { return &acidzalandov1.Postgresql{} },
|
||||||
|
func() *acidzalandov1.PostgresqlList { return &acidzalandov1.PostgresqlList{} },
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any.
|
|
||||||
func (c *postgresqls) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Postgresql, err error) {
|
|
||||||
result = &v1.Postgresql{}
|
|
||||||
err = c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresqls").
|
|
||||||
Name(name).
|
|
||||||
VersionedParams(&options, scheme.ParameterCodec).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// List takes label and field selectors, and returns the list of Postgresqls that match those selectors.
|
|
||||||
func (c *postgresqls) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PostgresqlList, err error) {
|
|
||||||
var timeout time.Duration
|
|
||||||
if opts.TimeoutSeconds != nil {
|
|
||||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
|
||||||
}
|
|
||||||
result = &v1.PostgresqlList{}
|
|
||||||
err = c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresqls").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Timeout(timeout).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch returns a watch.Interface that watches the requested postgresqls.
|
|
||||||
func (c *postgresqls) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
|
||||||
var timeout time.Duration
|
|
||||||
if opts.TimeoutSeconds != nil {
|
|
||||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
|
||||||
}
|
|
||||||
opts.Watch = true
|
|
||||||
return c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresqls").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Timeout(timeout).
|
|
||||||
Watch(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
|
||||||
func (c *postgresqls) Create(ctx context.Context, postgresql *v1.Postgresql, opts metav1.CreateOptions) (result *v1.Postgresql, err error) {
|
|
||||||
result = &v1.Postgresql{}
|
|
||||||
err = c.client.Post().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresqls").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(postgresql).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
|
||||||
func (c *postgresqls) Update(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (result *v1.Postgresql, err error) {
|
|
||||||
result = &v1.Postgresql{}
|
|
||||||
err = c.client.Put().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresqls").
|
|
||||||
Name(postgresql.Name).
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(postgresql).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateStatus was generated because the type contains a Status member.
|
|
||||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
|
||||||
func (c *postgresqls) UpdateStatus(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (result *v1.Postgresql, err error) {
|
|
||||||
result = &v1.Postgresql{}
|
|
||||||
err = c.client.Put().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresqls").
|
|
||||||
Name(postgresql.Name).
|
|
||||||
SubResource("status").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(postgresql).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete takes name of the postgresql and deletes it. Returns an error if one occurs.
|
|
||||||
func (c *postgresqls) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
|
||||||
return c.client.Delete().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresqls").
|
|
||||||
Name(name).
|
|
||||||
Body(&opts).
|
|
||||||
Do(ctx).
|
|
||||||
Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteCollection deletes a collection of objects.
|
|
||||||
func (c *postgresqls) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
|
||||||
var timeout time.Duration
|
|
||||||
if listOpts.TimeoutSeconds != nil {
|
|
||||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
|
||||||
}
|
|
||||||
return c.client.Delete().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresqls").
|
|
||||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
|
||||||
Timeout(timeout).
|
|
||||||
Body(&opts).
|
|
||||||
Do(ctx).
|
|
||||||
Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch applies the patch and returns the patched postgresql.
|
|
||||||
func (c *postgresqls) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Postgresql, err error) {
|
|
||||||
result = &v1.Postgresql{}
|
|
||||||
err = c.client.Patch(pt).
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresqls").
|
|
||||||
Name(name).
|
|
||||||
SubResource(subresources...).
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(data).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,15 +25,14 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
context "context"
|
||||||
"time"
|
|
||||||
|
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
types "k8s.io/apimachinery/pkg/types"
|
types "k8s.io/apimachinery/pkg/types"
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
rest "k8s.io/client-go/rest"
|
gentype "k8s.io/client-go/gentype"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PostgresTeamsGetter has a method to return a PostgresTeamInterface.
|
// PostgresTeamsGetter has a method to return a PostgresTeamInterface.
|
||||||
|
|
@ -44,141 +43,32 @@ type PostgresTeamsGetter interface {
|
||||||
|
|
||||||
// PostgresTeamInterface has methods to work with PostgresTeam resources.
|
// PostgresTeamInterface has methods to work with PostgresTeam resources.
|
||||||
type PostgresTeamInterface interface {
|
type PostgresTeamInterface interface {
|
||||||
Create(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.CreateOptions) (*v1.PostgresTeam, error)
|
Create(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts metav1.CreateOptions) (*acidzalandov1.PostgresTeam, error)
|
||||||
Update(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.UpdateOptions) (*v1.PostgresTeam, error)
|
Update(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts metav1.UpdateOptions) (*acidzalandov1.PostgresTeam, error)
|
||||||
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
||||||
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
||||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PostgresTeam, error)
|
Get(ctx context.Context, name string, opts metav1.GetOptions) (*acidzalandov1.PostgresTeam, error)
|
||||||
List(ctx context.Context, opts metav1.ListOptions) (*v1.PostgresTeamList, error)
|
List(ctx context.Context, opts metav1.ListOptions) (*acidzalandov1.PostgresTeamList, error)
|
||||||
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
||||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PostgresTeam, err error)
|
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *acidzalandov1.PostgresTeam, err error)
|
||||||
PostgresTeamExpansion
|
PostgresTeamExpansion
|
||||||
}
|
}
|
||||||
|
|
||||||
// postgresTeams implements PostgresTeamInterface
|
// postgresTeams implements PostgresTeamInterface
|
||||||
type postgresTeams struct {
|
type postgresTeams struct {
|
||||||
client rest.Interface
|
*gentype.ClientWithList[*acidzalandov1.PostgresTeam, *acidzalandov1.PostgresTeamList]
|
||||||
ns string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPostgresTeams returns a PostgresTeams
|
// newPostgresTeams returns a PostgresTeams
|
||||||
func newPostgresTeams(c *AcidV1Client, namespace string) *postgresTeams {
|
func newPostgresTeams(c *AcidV1Client, namespace string) *postgresTeams {
|
||||||
return &postgresTeams{
|
return &postgresTeams{
|
||||||
client: c.RESTClient(),
|
gentype.NewClientWithList[*acidzalandov1.PostgresTeam, *acidzalandov1.PostgresTeamList](
|
||||||
ns: namespace,
|
"postgresteams",
|
||||||
|
c.RESTClient(),
|
||||||
|
scheme.ParameterCodec,
|
||||||
|
namespace,
|
||||||
|
func() *acidzalandov1.PostgresTeam { return &acidzalandov1.PostgresTeam{} },
|
||||||
|
func() *acidzalandov1.PostgresTeamList { return &acidzalandov1.PostgresTeamList{} },
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get takes name of the postgresTeam, and returns the corresponding postgresTeam object, and an error if there is any.
|
|
||||||
func (c *postgresTeams) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PostgresTeam, err error) {
|
|
||||||
result = &v1.PostgresTeam{}
|
|
||||||
err = c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresteams").
|
|
||||||
Name(name).
|
|
||||||
VersionedParams(&options, scheme.ParameterCodec).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// List takes label and field selectors, and returns the list of PostgresTeams that match those selectors.
|
|
||||||
func (c *postgresTeams) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PostgresTeamList, err error) {
|
|
||||||
var timeout time.Duration
|
|
||||||
if opts.TimeoutSeconds != nil {
|
|
||||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
|
||||||
}
|
|
||||||
result = &v1.PostgresTeamList{}
|
|
||||||
err = c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresteams").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Timeout(timeout).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch returns a watch.Interface that watches the requested postgresTeams.
|
|
||||||
func (c *postgresTeams) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
|
||||||
var timeout time.Duration
|
|
||||||
if opts.TimeoutSeconds != nil {
|
|
||||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
|
||||||
}
|
|
||||||
opts.Watch = true
|
|
||||||
return c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresteams").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Timeout(timeout).
|
|
||||||
Watch(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create takes the representation of a postgresTeam and creates it. Returns the server's representation of the postgresTeam, and an error, if there is any.
|
|
||||||
func (c *postgresTeams) Create(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.CreateOptions) (result *v1.PostgresTeam, err error) {
|
|
||||||
result = &v1.PostgresTeam{}
|
|
||||||
err = c.client.Post().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresteams").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(postgresTeam).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update takes the representation of a postgresTeam and updates it. Returns the server's representation of the postgresTeam, and an error, if there is any.
|
|
||||||
func (c *postgresTeams) Update(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.UpdateOptions) (result *v1.PostgresTeam, err error) {
|
|
||||||
result = &v1.PostgresTeam{}
|
|
||||||
err = c.client.Put().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresteams").
|
|
||||||
Name(postgresTeam.Name).
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(postgresTeam).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete takes name of the postgresTeam and deletes it. Returns an error if one occurs.
|
|
||||||
func (c *postgresTeams) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
|
||||||
return c.client.Delete().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresteams").
|
|
||||||
Name(name).
|
|
||||||
Body(&opts).
|
|
||||||
Do(ctx).
|
|
||||||
Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteCollection deletes a collection of objects.
|
|
||||||
func (c *postgresTeams) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
|
||||||
var timeout time.Duration
|
|
||||||
if listOpts.TimeoutSeconds != nil {
|
|
||||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
|
||||||
}
|
|
||||||
return c.client.Delete().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresteams").
|
|
||||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
|
||||||
Timeout(timeout).
|
|
||||||
Body(&opts).
|
|
||||||
Do(ctx).
|
|
||||||
Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch applies the patch and returns the patched postgresTeam.
|
|
||||||
func (c *postgresTeams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PostgresTeam, err error) {
|
|
||||||
result = &v1.PostgresTeam{}
|
|
||||||
err = c.client.Patch(pt).
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("postgresteams").
|
|
||||||
Name(name).
|
|
||||||
SubResource(subresources...).
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(data).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,15 +25,14 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
context "context"
|
||||||
"time"
|
|
||||||
|
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
zalandoorgv1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
||||||
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
types "k8s.io/apimachinery/pkg/types"
|
types "k8s.io/apimachinery/pkg/types"
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
rest "k8s.io/client-go/rest"
|
gentype "k8s.io/client-go/gentype"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FabricEventStreamsGetter has a method to return a FabricEventStreamInterface.
|
// FabricEventStreamsGetter has a method to return a FabricEventStreamInterface.
|
||||||
|
|
@ -44,141 +43,32 @@ type FabricEventStreamsGetter interface {
|
||||||
|
|
||||||
// FabricEventStreamInterface has methods to work with FabricEventStream resources.
|
// FabricEventStreamInterface has methods to work with FabricEventStream resources.
|
||||||
type FabricEventStreamInterface interface {
|
type FabricEventStreamInterface interface {
|
||||||
Create(ctx context.Context, fabricEventStream *v1.FabricEventStream, opts metav1.CreateOptions) (*v1.FabricEventStream, error)
|
Create(ctx context.Context, fabricEventStream *zalandoorgv1.FabricEventStream, opts metav1.CreateOptions) (*zalandoorgv1.FabricEventStream, error)
|
||||||
Update(ctx context.Context, fabricEventStream *v1.FabricEventStream, opts metav1.UpdateOptions) (*v1.FabricEventStream, error)
|
Update(ctx context.Context, fabricEventStream *zalandoorgv1.FabricEventStream, opts metav1.UpdateOptions) (*zalandoorgv1.FabricEventStream, error)
|
||||||
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
||||||
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
||||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FabricEventStream, error)
|
Get(ctx context.Context, name string, opts metav1.GetOptions) (*zalandoorgv1.FabricEventStream, error)
|
||||||
List(ctx context.Context, opts metav1.ListOptions) (*v1.FabricEventStreamList, error)
|
List(ctx context.Context, opts metav1.ListOptions) (*zalandoorgv1.FabricEventStreamList, error)
|
||||||
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
||||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FabricEventStream, err error)
|
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *zalandoorgv1.FabricEventStream, err error)
|
||||||
FabricEventStreamExpansion
|
FabricEventStreamExpansion
|
||||||
}
|
}
|
||||||
|
|
||||||
// fabricEventStreams implements FabricEventStreamInterface
|
// fabricEventStreams implements FabricEventStreamInterface
|
||||||
type fabricEventStreams struct {
|
type fabricEventStreams struct {
|
||||||
client rest.Interface
|
*gentype.ClientWithList[*zalandoorgv1.FabricEventStream, *zalandoorgv1.FabricEventStreamList]
|
||||||
ns string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFabricEventStreams returns a FabricEventStreams
|
// newFabricEventStreams returns a FabricEventStreams
|
||||||
func newFabricEventStreams(c *ZalandoV1Client, namespace string) *fabricEventStreams {
|
func newFabricEventStreams(c *ZalandoV1Client, namespace string) *fabricEventStreams {
|
||||||
return &fabricEventStreams{
|
return &fabricEventStreams{
|
||||||
client: c.RESTClient(),
|
gentype.NewClientWithList[*zalandoorgv1.FabricEventStream, *zalandoorgv1.FabricEventStreamList](
|
||||||
ns: namespace,
|
"fabriceventstreams",
|
||||||
|
c.RESTClient(),
|
||||||
|
scheme.ParameterCodec,
|
||||||
|
namespace,
|
||||||
|
func() *zalandoorgv1.FabricEventStream { return &zalandoorgv1.FabricEventStream{} },
|
||||||
|
func() *zalandoorgv1.FabricEventStreamList { return &zalandoorgv1.FabricEventStreamList{} },
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get takes name of the fabricEventStream, and returns the corresponding fabricEventStream object, and an error if there is any.
|
|
||||||
func (c *fabricEventStreams) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FabricEventStream, err error) {
|
|
||||||
result = &v1.FabricEventStream{}
|
|
||||||
err = c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("fabriceventstreams").
|
|
||||||
Name(name).
|
|
||||||
VersionedParams(&options, scheme.ParameterCodec).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// List takes label and field selectors, and returns the list of FabricEventStreams that match those selectors.
|
|
||||||
func (c *fabricEventStreams) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FabricEventStreamList, err error) {
|
|
||||||
var timeout time.Duration
|
|
||||||
if opts.TimeoutSeconds != nil {
|
|
||||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
|
||||||
}
|
|
||||||
result = &v1.FabricEventStreamList{}
|
|
||||||
err = c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("fabriceventstreams").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Timeout(timeout).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch returns a watch.Interface that watches the requested fabricEventStreams.
|
|
||||||
func (c *fabricEventStreams) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
|
||||||
var timeout time.Duration
|
|
||||||
if opts.TimeoutSeconds != nil {
|
|
||||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
|
||||||
}
|
|
||||||
opts.Watch = true
|
|
||||||
return c.client.Get().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("fabriceventstreams").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Timeout(timeout).
|
|
||||||
Watch(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create takes the representation of a fabricEventStream and creates it. Returns the server's representation of the fabricEventStream, and an error, if there is any.
|
|
||||||
func (c *fabricEventStreams) Create(ctx context.Context, fabricEventStream *v1.FabricEventStream, opts metav1.CreateOptions) (result *v1.FabricEventStream, err error) {
|
|
||||||
result = &v1.FabricEventStream{}
|
|
||||||
err = c.client.Post().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("fabriceventstreams").
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(fabricEventStream).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update takes the representation of a fabricEventStream and updates it. Returns the server's representation of the fabricEventStream, and an error, if there is any.
|
|
||||||
func (c *fabricEventStreams) Update(ctx context.Context, fabricEventStream *v1.FabricEventStream, opts metav1.UpdateOptions) (result *v1.FabricEventStream, err error) {
|
|
||||||
result = &v1.FabricEventStream{}
|
|
||||||
err = c.client.Put().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("fabriceventstreams").
|
|
||||||
Name(fabricEventStream.Name).
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(fabricEventStream).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete takes name of the fabricEventStream and deletes it. Returns an error if one occurs.
|
|
||||||
func (c *fabricEventStreams) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
|
||||||
return c.client.Delete().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("fabriceventstreams").
|
|
||||||
Name(name).
|
|
||||||
Body(&opts).
|
|
||||||
Do(ctx).
|
|
||||||
Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteCollection deletes a collection of objects.
|
|
||||||
func (c *fabricEventStreams) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
|
||||||
var timeout time.Duration
|
|
||||||
if listOpts.TimeoutSeconds != nil {
|
|
||||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
|
||||||
}
|
|
||||||
return c.client.Delete().
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("fabriceventstreams").
|
|
||||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
|
||||||
Timeout(timeout).
|
|
||||||
Body(&opts).
|
|
||||||
Do(ctx).
|
|
||||||
Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch applies the patch and returns the patched fabricEventStream.
|
|
||||||
func (c *fabricEventStreams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FabricEventStream, err error) {
|
|
||||||
result = &v1.FabricEventStream{}
|
|
||||||
err = c.client.Patch(pt).
|
|
||||||
Namespace(c.ns).
|
|
||||||
Resource("fabriceventstreams").
|
|
||||||
Name(name).
|
|
||||||
SubResource(subresources...).
|
|
||||||
VersionedParams(&opts, scheme.ParameterCodec).
|
|
||||||
Body(data).
|
|
||||||
Do(ctx).
|
|
||||||
Into(result)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,112 +25,34 @@ SOFTWARE.
|
||||||
package fake
|
package fake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
v1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
||||||
|
zalandoorgv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/zalando.org/v1"
|
||||||
zalandoorgv1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
gentype "k8s.io/client-go/gentype"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
labels "k8s.io/apimachinery/pkg/labels"
|
|
||||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
types "k8s.io/apimachinery/pkg/types"
|
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
|
||||||
testing "k8s.io/client-go/testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// FakeFabricEventStreams implements FabricEventStreamInterface
|
// fakeFabricEventStreams implements FabricEventStreamInterface
|
||||||
type FakeFabricEventStreams struct {
|
type fakeFabricEventStreams struct {
|
||||||
|
*gentype.FakeClientWithList[*v1.FabricEventStream, *v1.FabricEventStreamList]
|
||||||
Fake *FakeZalandoV1
|
Fake *FakeZalandoV1
|
||||||
ns string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var fabriceventstreamsResource = schema.GroupVersionResource{Group: "zalando.org", Version: "v1", Resource: "fabriceventstreams"}
|
func newFakeFabricEventStreams(fake *FakeZalandoV1, namespace string) zalandoorgv1.FabricEventStreamInterface {
|
||||||
|
return &fakeFabricEventStreams{
|
||||||
var fabriceventstreamsKind = schema.GroupVersionKind{Group: "zalando.org", Version: "v1", Kind: "FabricEventStream"}
|
gentype.NewFakeClientWithList[*v1.FabricEventStream, *v1.FabricEventStreamList](
|
||||||
|
fake.Fake,
|
||||||
// Get takes name of the fabricEventStream, and returns the corresponding fabricEventStream object, and an error if there is any.
|
namespace,
|
||||||
func (c *FakeFabricEventStreams) Get(ctx context.Context, name string, options v1.GetOptions) (result *zalandoorgv1.FabricEventStream, err error) {
|
v1.SchemeGroupVersion.WithResource("fabriceventstreams"),
|
||||||
obj, err := c.Fake.
|
v1.SchemeGroupVersion.WithKind("FabricEventStream"),
|
||||||
Invokes(testing.NewGetAction(fabriceventstreamsResource, c.ns, name), &zalandoorgv1.FabricEventStream{})
|
func() *v1.FabricEventStream { return &v1.FabricEventStream{} },
|
||||||
|
func() *v1.FabricEventStreamList { return &v1.FabricEventStreamList{} },
|
||||||
if obj == nil {
|
func(dst, src *v1.FabricEventStreamList) { dst.ListMeta = src.ListMeta },
|
||||||
return nil, err
|
func(list *v1.FabricEventStreamList) []*v1.FabricEventStream {
|
||||||
|
return gentype.ToPointerSlice(list.Items)
|
||||||
|
},
|
||||||
|
func(list *v1.FabricEventStreamList, items []*v1.FabricEventStream) {
|
||||||
|
list.Items = gentype.FromPointerSlice(items)
|
||||||
|
},
|
||||||
|
),
|
||||||
|
fake,
|
||||||
}
|
}
|
||||||
return obj.(*zalandoorgv1.FabricEventStream), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// List takes label and field selectors, and returns the list of FabricEventStreams that match those selectors.
|
|
||||||
func (c *FakeFabricEventStreams) List(ctx context.Context, opts v1.ListOptions) (result *zalandoorgv1.FabricEventStreamList, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewListAction(fabriceventstreamsResource, fabriceventstreamsKind, c.ns, opts), &zalandoorgv1.FabricEventStreamList{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
|
||||||
if label == nil {
|
|
||||||
label = labels.Everything()
|
|
||||||
}
|
|
||||||
list := &zalandoorgv1.FabricEventStreamList{ListMeta: obj.(*zalandoorgv1.FabricEventStreamList).ListMeta}
|
|
||||||
for _, item := range obj.(*zalandoorgv1.FabricEventStreamList).Items {
|
|
||||||
if label.Matches(labels.Set(item.Labels)) {
|
|
||||||
list.Items = append(list.Items, item)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return list, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch returns a watch.Interface that watches the requested fabricEventStreams.
|
|
||||||
func (c *FakeFabricEventStreams) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
|
||||||
return c.Fake.
|
|
||||||
InvokesWatch(testing.NewWatchAction(fabriceventstreamsResource, c.ns, opts))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create takes the representation of a fabricEventStream and creates it. Returns the server's representation of the fabricEventStream, and an error, if there is any.
|
|
||||||
func (c *FakeFabricEventStreams) Create(ctx context.Context, fabricEventStream *zalandoorgv1.FabricEventStream, opts v1.CreateOptions) (result *zalandoorgv1.FabricEventStream, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewCreateAction(fabriceventstreamsResource, c.ns, fabricEventStream), &zalandoorgv1.FabricEventStream{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*zalandoorgv1.FabricEventStream), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update takes the representation of a fabricEventStream and updates it. Returns the server's representation of the fabricEventStream, and an error, if there is any.
|
|
||||||
func (c *FakeFabricEventStreams) Update(ctx context.Context, fabricEventStream *zalandoorgv1.FabricEventStream, opts v1.UpdateOptions) (result *zalandoorgv1.FabricEventStream, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewUpdateAction(fabriceventstreamsResource, c.ns, fabricEventStream), &zalandoorgv1.FabricEventStream{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*zalandoorgv1.FabricEventStream), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete takes name of the fabricEventStream and deletes it. Returns an error if one occurs.
|
|
||||||
func (c *FakeFabricEventStreams) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
|
||||||
_, err := c.Fake.
|
|
||||||
Invokes(testing.NewDeleteActionWithOptions(fabriceventstreamsResource, c.ns, name, opts), &zalandoorgv1.FabricEventStream{})
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteCollection deletes a collection of objects.
|
|
||||||
func (c *FakeFabricEventStreams) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
|
||||||
action := testing.NewDeleteCollectionAction(fabriceventstreamsResource, c.ns, listOpts)
|
|
||||||
|
|
||||||
_, err := c.Fake.Invokes(action, &zalandoorgv1.FabricEventStreamList{})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch applies the patch and returns the patched fabricEventStream.
|
|
||||||
func (c *FakeFabricEventStreams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *zalandoorgv1.FabricEventStream, err error) {
|
|
||||||
obj, err := c.Fake.
|
|
||||||
Invokes(testing.NewPatchSubresourceAction(fabriceventstreamsResource, c.ns, name, pt, data, subresources...), &zalandoorgv1.FabricEventStream{})
|
|
||||||
|
|
||||||
if obj == nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.(*zalandoorgv1.FabricEventStream), err
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -35,7 +35,7 @@ type FakeZalandoV1 struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FakeZalandoV1) FabricEventStreams(namespace string) v1.FabricEventStreamInterface {
|
func (c *FakeZalandoV1) FabricEventStreams(namespace string) v1.FabricEventStreamInterface {
|
||||||
return &FakeFabricEventStreams{c, namespace}
|
return newFakeFabricEventStreams(c, namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RESTClient returns a RESTClient that is used to communicate
|
// RESTClient returns a RESTClient that is used to communicate
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,10 +25,10 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
http "net/http"
|
||||||
|
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
zalandoorgv1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||||
rest "k8s.io/client-go/rest"
|
rest "k8s.io/client-go/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -91,10 +91,10 @@ func New(c rest.Interface) *ZalandoV1Client {
|
||||||
}
|
}
|
||||||
|
|
||||||
func setConfigDefaults(config *rest.Config) error {
|
func setConfigDefaults(config *rest.Config) error {
|
||||||
gv := v1.SchemeGroupVersion
|
gv := zalandoorgv1.SchemeGroupVersion
|
||||||
config.GroupVersion = &gv
|
config.GroupVersion = &gv
|
||||||
config.APIPath = "/apis"
|
config.APIPath = "/apis"
|
||||||
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
|
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
|
||||||
|
|
||||||
if config.UserAgent == "" {
|
if config.UserAgent == "" {
|
||||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,13 +25,13 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
context "context"
|
||||||
time "time"
|
time "time"
|
||||||
|
|
||||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
apisacidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
versioned "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned"
|
versioned "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned"
|
||||||
internalinterfaces "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
internalinterfaces "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/generated/listers/acid.zalan.do/v1"
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/generated/listers/acid.zalan.do/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
|
|
@ -42,7 +42,7 @@ import (
|
||||||
// Postgresqls.
|
// Postgresqls.
|
||||||
type PostgresqlInformer interface {
|
type PostgresqlInformer interface {
|
||||||
Informer() cache.SharedIndexInformer
|
Informer() cache.SharedIndexInformer
|
||||||
Lister() v1.PostgresqlLister
|
Lister() acidzalandov1.PostgresqlLister
|
||||||
}
|
}
|
||||||
|
|
||||||
type postgresqlInformer struct {
|
type postgresqlInformer struct {
|
||||||
|
|
@ -77,7 +77,7 @@ func NewFilteredPostgresqlInformer(client versioned.Interface, namespace string,
|
||||||
return client.AcidV1().Postgresqls(namespace).Watch(context.TODO(), options)
|
return client.AcidV1().Postgresqls(namespace).Watch(context.TODO(), options)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&acidzalandov1.Postgresql{},
|
&apisacidzalandov1.Postgresql{},
|
||||||
resyncPeriod,
|
resyncPeriod,
|
||||||
indexers,
|
indexers,
|
||||||
)
|
)
|
||||||
|
|
@ -88,9 +88,9 @@ func (f *postgresqlInformer) defaultInformer(client versioned.Interface, resyncP
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *postgresqlInformer) Informer() cache.SharedIndexInformer {
|
func (f *postgresqlInformer) Informer() cache.SharedIndexInformer {
|
||||||
return f.factory.InformerFor(&acidzalandov1.Postgresql{}, f.defaultInformer)
|
return f.factory.InformerFor(&apisacidzalandov1.Postgresql{}, f.defaultInformer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *postgresqlInformer) Lister() v1.PostgresqlLister {
|
func (f *postgresqlInformer) Lister() acidzalandov1.PostgresqlLister {
|
||||||
return v1.NewPostgresqlLister(f.Informer().GetIndexer())
|
return acidzalandov1.NewPostgresqlLister(f.Informer().GetIndexer())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,13 +25,13 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
context "context"
|
||||||
time "time"
|
time "time"
|
||||||
|
|
||||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
apisacidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
versioned "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned"
|
versioned "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned"
|
||||||
internalinterfaces "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
internalinterfaces "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/generated/listers/acid.zalan.do/v1"
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/generated/listers/acid.zalan.do/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
|
|
@ -42,7 +42,7 @@ import (
|
||||||
// PostgresTeams.
|
// PostgresTeams.
|
||||||
type PostgresTeamInformer interface {
|
type PostgresTeamInformer interface {
|
||||||
Informer() cache.SharedIndexInformer
|
Informer() cache.SharedIndexInformer
|
||||||
Lister() v1.PostgresTeamLister
|
Lister() acidzalandov1.PostgresTeamLister
|
||||||
}
|
}
|
||||||
|
|
||||||
type postgresTeamInformer struct {
|
type postgresTeamInformer struct {
|
||||||
|
|
@ -77,7 +77,7 @@ func NewFilteredPostgresTeamInformer(client versioned.Interface, namespace strin
|
||||||
return client.AcidV1().PostgresTeams(namespace).Watch(context.TODO(), options)
|
return client.AcidV1().PostgresTeams(namespace).Watch(context.TODO(), options)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&acidzalandov1.PostgresTeam{},
|
&apisacidzalandov1.PostgresTeam{},
|
||||||
resyncPeriod,
|
resyncPeriod,
|
||||||
indexers,
|
indexers,
|
||||||
)
|
)
|
||||||
|
|
@ -88,9 +88,9 @@ func (f *postgresTeamInformer) defaultInformer(client versioned.Interface, resyn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *postgresTeamInformer) Informer() cache.SharedIndexInformer {
|
func (f *postgresTeamInformer) Informer() cache.SharedIndexInformer {
|
||||||
return f.factory.InformerFor(&acidzalandov1.PostgresTeam{}, f.defaultInformer)
|
return f.factory.InformerFor(&apisacidzalandov1.PostgresTeam{}, f.defaultInformer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *postgresTeamInformer) Lister() v1.PostgresTeamLister {
|
func (f *postgresTeamInformer) Lister() acidzalandov1.PostgresTeamLister {
|
||||||
return v1.NewPostgresTeamLister(f.Informer().GetIndexer())
|
return acidzalandov1.NewPostgresTeamLister(f.Informer().GetIndexer())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -49,11 +49,17 @@ type sharedInformerFactory struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
defaultResync time.Duration
|
defaultResync time.Duration
|
||||||
customResync map[reflect.Type]time.Duration
|
customResync map[reflect.Type]time.Duration
|
||||||
|
transform cache.TransformFunc
|
||||||
|
|
||||||
informers map[reflect.Type]cache.SharedIndexInformer
|
informers map[reflect.Type]cache.SharedIndexInformer
|
||||||
// startedInformers is used for tracking which informers have been started.
|
// startedInformers is used for tracking which informers have been started.
|
||||||
// This allows Start() to be called multiple times safely.
|
// This allows Start() to be called multiple times safely.
|
||||||
startedInformers map[reflect.Type]bool
|
startedInformers map[reflect.Type]bool
|
||||||
|
// wg tracks how many goroutines were started.
|
||||||
|
wg sync.WaitGroup
|
||||||
|
// shuttingDown is true when Shutdown has been called. It may still be running
|
||||||
|
// because it needs to wait for goroutines.
|
||||||
|
shuttingDown bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
|
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
|
||||||
|
|
@ -82,6 +88,14 @@ func WithNamespace(namespace string) SharedInformerOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithTransform sets a transform on all informers.
|
||||||
|
func WithTransform(transform cache.TransformFunc) SharedInformerOption {
|
||||||
|
return func(factory *sharedInformerFactory) *sharedInformerFactory {
|
||||||
|
factory.transform = transform
|
||||||
|
return factory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
|
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
|
||||||
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
|
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
|
||||||
return NewSharedInformerFactoryWithOptions(client, defaultResync)
|
return NewSharedInformerFactoryWithOptions(client, defaultResync)
|
||||||
|
|
@ -114,20 +128,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy
|
||||||
return factory
|
return factory
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start initializes all requested informers.
|
|
||||||
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
|
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.shuttingDown {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
for informerType, informer := range f.informers {
|
for informerType, informer := range f.informers {
|
||||||
if !f.startedInformers[informerType] {
|
if !f.startedInformers[informerType] {
|
||||||
go informer.Run(stopCh)
|
f.wg.Add(1)
|
||||||
|
// We need a new variable in each loop iteration,
|
||||||
|
// otherwise the goroutine would use the loop variable
|
||||||
|
// and that keeps changing.
|
||||||
|
informer := informer
|
||||||
|
go func() {
|
||||||
|
defer f.wg.Done()
|
||||||
|
informer.Run(stopCh)
|
||||||
|
}()
|
||||||
f.startedInformers[informerType] = true
|
f.startedInformers[informerType] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForCacheSync waits for all started informers' cache were synced.
|
func (f *sharedInformerFactory) Shutdown() {
|
||||||
|
f.lock.Lock()
|
||||||
|
f.shuttingDown = true
|
||||||
|
f.lock.Unlock()
|
||||||
|
|
||||||
|
// Will return immediately if there is nothing to wait for.
|
||||||
|
f.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
|
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
|
||||||
informers := func() map[reflect.Type]cache.SharedIndexInformer {
|
informers := func() map[reflect.Type]cache.SharedIndexInformer {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
|
|
@ -149,7 +182,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
|
// InformerFor returns the SharedIndexInformer for obj using an internal
|
||||||
// client.
|
// client.
|
||||||
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
|
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
|
|
@ -167,6 +200,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
|
||||||
}
|
}
|
||||||
|
|
||||||
informer = newFunc(f.client, resyncPeriod)
|
informer = newFunc(f.client, resyncPeriod)
|
||||||
|
informer.SetTransform(f.transform)
|
||||||
f.informers[informerType] = informer
|
f.informers[informerType] = informer
|
||||||
|
|
||||||
return informer
|
return informer
|
||||||
|
|
@ -174,11 +208,59 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
|
||||||
|
|
||||||
// SharedInformerFactory provides shared informers for resources in all known
|
// SharedInformerFactory provides shared informers for resources in all known
|
||||||
// API group versions.
|
// API group versions.
|
||||||
|
//
|
||||||
|
// It is typically used like this:
|
||||||
|
//
|
||||||
|
// ctx, cancel := context.Background()
|
||||||
|
// defer cancel()
|
||||||
|
// factory := NewSharedInformerFactory(client, resyncPeriod)
|
||||||
|
// defer factory.WaitForStop() // Returns immediately if nothing was started.
|
||||||
|
// genericInformer := factory.ForResource(resource)
|
||||||
|
// typedInformer := factory.SomeAPIGroup().V1().SomeType()
|
||||||
|
// factory.Start(ctx.Done()) // Start processing these informers.
|
||||||
|
// synced := factory.WaitForCacheSync(ctx.Done())
|
||||||
|
// for v, ok := range synced {
|
||||||
|
// if !ok {
|
||||||
|
// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Creating informers can also be created after Start, but then
|
||||||
|
// // Start must be called again:
|
||||||
|
// anotherGenericInformer := factory.ForResource(resource)
|
||||||
|
// factory.Start(ctx.Done())
|
||||||
type SharedInformerFactory interface {
|
type SharedInformerFactory interface {
|
||||||
internalinterfaces.SharedInformerFactory
|
internalinterfaces.SharedInformerFactory
|
||||||
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
|
|
||||||
|
// Start initializes all requested informers. They are handled in goroutines
|
||||||
|
// which run until the stop channel gets closed.
|
||||||
|
// Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
|
||||||
|
Start(stopCh <-chan struct{})
|
||||||
|
|
||||||
|
// Shutdown marks a factory as shutting down. At that point no new
|
||||||
|
// informers can be started anymore and Start will return without
|
||||||
|
// doing anything.
|
||||||
|
//
|
||||||
|
// In addition, Shutdown blocks until all goroutines have terminated. For that
|
||||||
|
// to happen, the close channel(s) that they were started with must be closed,
|
||||||
|
// either before Shutdown gets called or while it is waiting.
|
||||||
|
//
|
||||||
|
// Shutdown may be called multiple times, even concurrently. All such calls will
|
||||||
|
// block until all goroutines have terminated.
|
||||||
|
Shutdown()
|
||||||
|
|
||||||
|
// WaitForCacheSync blocks until all started informers' caches were synced
|
||||||
|
// or the stop channel gets closed.
|
||||||
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
|
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
|
||||||
|
|
||||||
|
// ForResource gives generic access to a shared informer of the matching type.
|
||||||
|
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
|
||||||
|
|
||||||
|
// InformerFor returns the SharedIndexInformer for obj using an internal
|
||||||
|
// client.
|
||||||
|
InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
|
||||||
|
|
||||||
Acid() acidzalando.Interface
|
Acid() acidzalando.Interface
|
||||||
Zalando() zalandoorg.Interface
|
Zalando() zalandoorg.Interface
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,7 +25,7 @@ SOFTWARE.
|
||||||
package externalversions
|
package externalversions
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
fmt "fmt"
|
||||||
|
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
zalandoorgv1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
zalandoorgv1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,13 +25,13 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
context "context"
|
||||||
time "time"
|
time "time"
|
||||||
|
|
||||||
zalandoorgv1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
apiszalandoorgv1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
||||||
versioned "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned"
|
versioned "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned"
|
||||||
internalinterfaces "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
internalinterfaces "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/generated/listers/zalando.org/v1"
|
zalandoorgv1 "github.com/zalando/postgres-operator/pkg/generated/listers/zalando.org/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
|
|
@ -42,7 +42,7 @@ import (
|
||||||
// FabricEventStreams.
|
// FabricEventStreams.
|
||||||
type FabricEventStreamInformer interface {
|
type FabricEventStreamInformer interface {
|
||||||
Informer() cache.SharedIndexInformer
|
Informer() cache.SharedIndexInformer
|
||||||
Lister() v1.FabricEventStreamLister
|
Lister() zalandoorgv1.FabricEventStreamLister
|
||||||
}
|
}
|
||||||
|
|
||||||
type fabricEventStreamInformer struct {
|
type fabricEventStreamInformer struct {
|
||||||
|
|
@ -77,7 +77,7 @@ func NewFilteredFabricEventStreamInformer(client versioned.Interface, namespace
|
||||||
return client.ZalandoV1().FabricEventStreams(namespace).Watch(context.TODO(), options)
|
return client.ZalandoV1().FabricEventStreams(namespace).Watch(context.TODO(), options)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&zalandoorgv1.FabricEventStream{},
|
&apiszalandoorgv1.FabricEventStream{},
|
||||||
resyncPeriod,
|
resyncPeriod,
|
||||||
indexers,
|
indexers,
|
||||||
)
|
)
|
||||||
|
|
@ -88,9 +88,9 @@ func (f *fabricEventStreamInformer) defaultInformer(client versioned.Interface,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fabricEventStreamInformer) Informer() cache.SharedIndexInformer {
|
func (f *fabricEventStreamInformer) Informer() cache.SharedIndexInformer {
|
||||||
return f.factory.InformerFor(&zalandoorgv1.FabricEventStream{}, f.defaultInformer)
|
return f.factory.InformerFor(&apiszalandoorgv1.FabricEventStream{}, f.defaultInformer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fabricEventStreamInformer) Lister() v1.FabricEventStreamLister {
|
func (f *fabricEventStreamInformer) Lister() zalandoorgv1.FabricEventStreamLister {
|
||||||
return v1.NewFabricEventStreamLister(f.Informer().GetIndexer())
|
return zalandoorgv1.NewFabricEventStreamLister(f.Informer().GetIndexer())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,10 +25,10 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
labels "k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
listers "k8s.io/client-go/listers"
|
||||||
"k8s.io/client-go/tools/cache"
|
cache "k8s.io/client-go/tools/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PostgresqlLister helps list Postgresqls.
|
// PostgresqlLister helps list Postgresqls.
|
||||||
|
|
@ -36,7 +36,7 @@ import (
|
||||||
type PostgresqlLister interface {
|
type PostgresqlLister interface {
|
||||||
// List lists all Postgresqls in the indexer.
|
// List lists all Postgresqls in the indexer.
|
||||||
// Objects returned here must be treated as read-only.
|
// Objects returned here must be treated as read-only.
|
||||||
List(selector labels.Selector) (ret []*v1.Postgresql, err error)
|
List(selector labels.Selector) (ret []*acidzalandov1.Postgresql, err error)
|
||||||
// Postgresqls returns an object that can list and get Postgresqls.
|
// Postgresqls returns an object that can list and get Postgresqls.
|
||||||
Postgresqls(namespace string) PostgresqlNamespaceLister
|
Postgresqls(namespace string) PostgresqlNamespaceLister
|
||||||
PostgresqlListerExpansion
|
PostgresqlListerExpansion
|
||||||
|
|
@ -44,25 +44,17 @@ type PostgresqlLister interface {
|
||||||
|
|
||||||
// postgresqlLister implements the PostgresqlLister interface.
|
// postgresqlLister implements the PostgresqlLister interface.
|
||||||
type postgresqlLister struct {
|
type postgresqlLister struct {
|
||||||
indexer cache.Indexer
|
listers.ResourceIndexer[*acidzalandov1.Postgresql]
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPostgresqlLister returns a new PostgresqlLister.
|
// NewPostgresqlLister returns a new PostgresqlLister.
|
||||||
func NewPostgresqlLister(indexer cache.Indexer) PostgresqlLister {
|
func NewPostgresqlLister(indexer cache.Indexer) PostgresqlLister {
|
||||||
return &postgresqlLister{indexer: indexer}
|
return &postgresqlLister{listers.New[*acidzalandov1.Postgresql](indexer, acidzalandov1.Resource("postgresql"))}
|
||||||
}
|
|
||||||
|
|
||||||
// List lists all Postgresqls in the indexer.
|
|
||||||
func (s *postgresqlLister) List(selector labels.Selector) (ret []*v1.Postgresql, err error) {
|
|
||||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
|
||||||
ret = append(ret, m.(*v1.Postgresql))
|
|
||||||
})
|
|
||||||
return ret, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Postgresqls returns an object that can list and get Postgresqls.
|
// Postgresqls returns an object that can list and get Postgresqls.
|
||||||
func (s *postgresqlLister) Postgresqls(namespace string) PostgresqlNamespaceLister {
|
func (s *postgresqlLister) Postgresqls(namespace string) PostgresqlNamespaceLister {
|
||||||
return postgresqlNamespaceLister{indexer: s.indexer, namespace: namespace}
|
return postgresqlNamespaceLister{listers.NewNamespaced[*acidzalandov1.Postgresql](s.ResourceIndexer, namespace)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgresqlNamespaceLister helps list and get Postgresqls.
|
// PostgresqlNamespaceLister helps list and get Postgresqls.
|
||||||
|
|
@ -70,36 +62,15 @@ func (s *postgresqlLister) Postgresqls(namespace string) PostgresqlNamespaceList
|
||||||
type PostgresqlNamespaceLister interface {
|
type PostgresqlNamespaceLister interface {
|
||||||
// List lists all Postgresqls in the indexer for a given namespace.
|
// List lists all Postgresqls in the indexer for a given namespace.
|
||||||
// Objects returned here must be treated as read-only.
|
// Objects returned here must be treated as read-only.
|
||||||
List(selector labels.Selector) (ret []*v1.Postgresql, err error)
|
List(selector labels.Selector) (ret []*acidzalandov1.Postgresql, err error)
|
||||||
// Get retrieves the Postgresql from the indexer for a given namespace and name.
|
// Get retrieves the Postgresql from the indexer for a given namespace and name.
|
||||||
// Objects returned here must be treated as read-only.
|
// Objects returned here must be treated as read-only.
|
||||||
Get(name string) (*v1.Postgresql, error)
|
Get(name string) (*acidzalandov1.Postgresql, error)
|
||||||
PostgresqlNamespaceListerExpansion
|
PostgresqlNamespaceListerExpansion
|
||||||
}
|
}
|
||||||
|
|
||||||
// postgresqlNamespaceLister implements the PostgresqlNamespaceLister
|
// postgresqlNamespaceLister implements the PostgresqlNamespaceLister
|
||||||
// interface.
|
// interface.
|
||||||
type postgresqlNamespaceLister struct {
|
type postgresqlNamespaceLister struct {
|
||||||
indexer cache.Indexer
|
listers.ResourceIndexer[*acidzalandov1.Postgresql]
|
||||||
namespace string
|
|
||||||
}
|
|
||||||
|
|
||||||
// List lists all Postgresqls in the indexer for a given namespace.
|
|
||||||
func (s postgresqlNamespaceLister) List(selector labels.Selector) (ret []*v1.Postgresql, err error) {
|
|
||||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
|
||||||
ret = append(ret, m.(*v1.Postgresql))
|
|
||||||
})
|
|
||||||
return ret, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get retrieves the Postgresql from the indexer for a given namespace and name.
|
|
||||||
func (s postgresqlNamespaceLister) Get(name string) (*v1.Postgresql, error) {
|
|
||||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
return nil, errors.NewNotFound(v1.Resource("postgresql"), name)
|
|
||||||
}
|
|
||||||
return obj.(*v1.Postgresql), nil
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,10 +25,10 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
labels "k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
listers "k8s.io/client-go/listers"
|
||||||
"k8s.io/client-go/tools/cache"
|
cache "k8s.io/client-go/tools/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PostgresTeamLister helps list PostgresTeams.
|
// PostgresTeamLister helps list PostgresTeams.
|
||||||
|
|
@ -36,7 +36,7 @@ import (
|
||||||
type PostgresTeamLister interface {
|
type PostgresTeamLister interface {
|
||||||
// List lists all PostgresTeams in the indexer.
|
// List lists all PostgresTeams in the indexer.
|
||||||
// Objects returned here must be treated as read-only.
|
// Objects returned here must be treated as read-only.
|
||||||
List(selector labels.Selector) (ret []*v1.PostgresTeam, err error)
|
List(selector labels.Selector) (ret []*acidzalandov1.PostgresTeam, err error)
|
||||||
// PostgresTeams returns an object that can list and get PostgresTeams.
|
// PostgresTeams returns an object that can list and get PostgresTeams.
|
||||||
PostgresTeams(namespace string) PostgresTeamNamespaceLister
|
PostgresTeams(namespace string) PostgresTeamNamespaceLister
|
||||||
PostgresTeamListerExpansion
|
PostgresTeamListerExpansion
|
||||||
|
|
@ -44,25 +44,17 @@ type PostgresTeamLister interface {
|
||||||
|
|
||||||
// postgresTeamLister implements the PostgresTeamLister interface.
|
// postgresTeamLister implements the PostgresTeamLister interface.
|
||||||
type postgresTeamLister struct {
|
type postgresTeamLister struct {
|
||||||
indexer cache.Indexer
|
listers.ResourceIndexer[*acidzalandov1.PostgresTeam]
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPostgresTeamLister returns a new PostgresTeamLister.
|
// NewPostgresTeamLister returns a new PostgresTeamLister.
|
||||||
func NewPostgresTeamLister(indexer cache.Indexer) PostgresTeamLister {
|
func NewPostgresTeamLister(indexer cache.Indexer) PostgresTeamLister {
|
||||||
return &postgresTeamLister{indexer: indexer}
|
return &postgresTeamLister{listers.New[*acidzalandov1.PostgresTeam](indexer, acidzalandov1.Resource("postgresteam"))}
|
||||||
}
|
|
||||||
|
|
||||||
// List lists all PostgresTeams in the indexer.
|
|
||||||
func (s *postgresTeamLister) List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) {
|
|
||||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
|
||||||
ret = append(ret, m.(*v1.PostgresTeam))
|
|
||||||
})
|
|
||||||
return ret, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgresTeams returns an object that can list and get PostgresTeams.
|
// PostgresTeams returns an object that can list and get PostgresTeams.
|
||||||
func (s *postgresTeamLister) PostgresTeams(namespace string) PostgresTeamNamespaceLister {
|
func (s *postgresTeamLister) PostgresTeams(namespace string) PostgresTeamNamespaceLister {
|
||||||
return postgresTeamNamespaceLister{indexer: s.indexer, namespace: namespace}
|
return postgresTeamNamespaceLister{listers.NewNamespaced[*acidzalandov1.PostgresTeam](s.ResourceIndexer, namespace)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostgresTeamNamespaceLister helps list and get PostgresTeams.
|
// PostgresTeamNamespaceLister helps list and get PostgresTeams.
|
||||||
|
|
@ -70,36 +62,15 @@ func (s *postgresTeamLister) PostgresTeams(namespace string) PostgresTeamNamespa
|
||||||
type PostgresTeamNamespaceLister interface {
|
type PostgresTeamNamespaceLister interface {
|
||||||
// List lists all PostgresTeams in the indexer for a given namespace.
|
// List lists all PostgresTeams in the indexer for a given namespace.
|
||||||
// Objects returned here must be treated as read-only.
|
// Objects returned here must be treated as read-only.
|
||||||
List(selector labels.Selector) (ret []*v1.PostgresTeam, err error)
|
List(selector labels.Selector) (ret []*acidzalandov1.PostgresTeam, err error)
|
||||||
// Get retrieves the PostgresTeam from the indexer for a given namespace and name.
|
// Get retrieves the PostgresTeam from the indexer for a given namespace and name.
|
||||||
// Objects returned here must be treated as read-only.
|
// Objects returned here must be treated as read-only.
|
||||||
Get(name string) (*v1.PostgresTeam, error)
|
Get(name string) (*acidzalandov1.PostgresTeam, error)
|
||||||
PostgresTeamNamespaceListerExpansion
|
PostgresTeamNamespaceListerExpansion
|
||||||
}
|
}
|
||||||
|
|
||||||
// postgresTeamNamespaceLister implements the PostgresTeamNamespaceLister
|
// postgresTeamNamespaceLister implements the PostgresTeamNamespaceLister
|
||||||
// interface.
|
// interface.
|
||||||
type postgresTeamNamespaceLister struct {
|
type postgresTeamNamespaceLister struct {
|
||||||
indexer cache.Indexer
|
listers.ResourceIndexer[*acidzalandov1.PostgresTeam]
|
||||||
namespace string
|
|
||||||
}
|
|
||||||
|
|
||||||
// List lists all PostgresTeams in the indexer for a given namespace.
|
|
||||||
func (s postgresTeamNamespaceLister) List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) {
|
|
||||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
|
||||||
ret = append(ret, m.(*v1.PostgresTeam))
|
|
||||||
})
|
|
||||||
return ret, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get retrieves the PostgresTeam from the indexer for a given namespace and name.
|
|
||||||
func (s postgresTeamNamespaceLister) Get(name string) (*v1.PostgresTeam, error) {
|
|
||||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
return nil, errors.NewNotFound(v1.Resource("postgresteam"), name)
|
|
||||||
}
|
|
||||||
return obj.(*v1.PostgresTeam), nil
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
Copyright 2025 Compose, Zalando SE
|
Copyright 2026 Compose, Zalando SE
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|
@ -25,10 +25,10 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
zalandoorgv1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
labels "k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
listers "k8s.io/client-go/listers"
|
||||||
"k8s.io/client-go/tools/cache"
|
cache "k8s.io/client-go/tools/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FabricEventStreamLister helps list FabricEventStreams.
|
// FabricEventStreamLister helps list FabricEventStreams.
|
||||||
|
|
@ -36,7 +36,7 @@ import (
|
||||||
type FabricEventStreamLister interface {
|
type FabricEventStreamLister interface {
|
||||||
// List lists all FabricEventStreams in the indexer.
|
// List lists all FabricEventStreams in the indexer.
|
||||||
// Objects returned here must be treated as read-only.
|
// Objects returned here must be treated as read-only.
|
||||||
List(selector labels.Selector) (ret []*v1.FabricEventStream, err error)
|
List(selector labels.Selector) (ret []*zalandoorgv1.FabricEventStream, err error)
|
||||||
// FabricEventStreams returns an object that can list and get FabricEventStreams.
|
// FabricEventStreams returns an object that can list and get FabricEventStreams.
|
||||||
FabricEventStreams(namespace string) FabricEventStreamNamespaceLister
|
FabricEventStreams(namespace string) FabricEventStreamNamespaceLister
|
||||||
FabricEventStreamListerExpansion
|
FabricEventStreamListerExpansion
|
||||||
|
|
@ -44,25 +44,17 @@ type FabricEventStreamLister interface {
|
||||||
|
|
||||||
// fabricEventStreamLister implements the FabricEventStreamLister interface.
|
// fabricEventStreamLister implements the FabricEventStreamLister interface.
|
||||||
type fabricEventStreamLister struct {
|
type fabricEventStreamLister struct {
|
||||||
indexer cache.Indexer
|
listers.ResourceIndexer[*zalandoorgv1.FabricEventStream]
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFabricEventStreamLister returns a new FabricEventStreamLister.
|
// NewFabricEventStreamLister returns a new FabricEventStreamLister.
|
||||||
func NewFabricEventStreamLister(indexer cache.Indexer) FabricEventStreamLister {
|
func NewFabricEventStreamLister(indexer cache.Indexer) FabricEventStreamLister {
|
||||||
return &fabricEventStreamLister{indexer: indexer}
|
return &fabricEventStreamLister{listers.New[*zalandoorgv1.FabricEventStream](indexer, zalandoorgv1.Resource("fabriceventstream"))}
|
||||||
}
|
|
||||||
|
|
||||||
// List lists all FabricEventStreams in the indexer.
|
|
||||||
func (s *fabricEventStreamLister) List(selector labels.Selector) (ret []*v1.FabricEventStream, err error) {
|
|
||||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
|
||||||
ret = append(ret, m.(*v1.FabricEventStream))
|
|
||||||
})
|
|
||||||
return ret, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FabricEventStreams returns an object that can list and get FabricEventStreams.
|
// FabricEventStreams returns an object that can list and get FabricEventStreams.
|
||||||
func (s *fabricEventStreamLister) FabricEventStreams(namespace string) FabricEventStreamNamespaceLister {
|
func (s *fabricEventStreamLister) FabricEventStreams(namespace string) FabricEventStreamNamespaceLister {
|
||||||
return fabricEventStreamNamespaceLister{indexer: s.indexer, namespace: namespace}
|
return fabricEventStreamNamespaceLister{listers.NewNamespaced[*zalandoorgv1.FabricEventStream](s.ResourceIndexer, namespace)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FabricEventStreamNamespaceLister helps list and get FabricEventStreams.
|
// FabricEventStreamNamespaceLister helps list and get FabricEventStreams.
|
||||||
|
|
@ -70,36 +62,15 @@ func (s *fabricEventStreamLister) FabricEventStreams(namespace string) FabricEve
|
||||||
type FabricEventStreamNamespaceLister interface {
|
type FabricEventStreamNamespaceLister interface {
|
||||||
// List lists all FabricEventStreams in the indexer for a given namespace.
|
// List lists all FabricEventStreams in the indexer for a given namespace.
|
||||||
// Objects returned here must be treated as read-only.
|
// Objects returned here must be treated as read-only.
|
||||||
List(selector labels.Selector) (ret []*v1.FabricEventStream, err error)
|
List(selector labels.Selector) (ret []*zalandoorgv1.FabricEventStream, err error)
|
||||||
// Get retrieves the FabricEventStream from the indexer for a given namespace and name.
|
// Get retrieves the FabricEventStream from the indexer for a given namespace and name.
|
||||||
// Objects returned here must be treated as read-only.
|
// Objects returned here must be treated as read-only.
|
||||||
Get(name string) (*v1.FabricEventStream, error)
|
Get(name string) (*zalandoorgv1.FabricEventStream, error)
|
||||||
FabricEventStreamNamespaceListerExpansion
|
FabricEventStreamNamespaceListerExpansion
|
||||||
}
|
}
|
||||||
|
|
||||||
// fabricEventStreamNamespaceLister implements the FabricEventStreamNamespaceLister
|
// fabricEventStreamNamespaceLister implements the FabricEventStreamNamespaceLister
|
||||||
// interface.
|
// interface.
|
||||||
type fabricEventStreamNamespaceLister struct {
|
type fabricEventStreamNamespaceLister struct {
|
||||||
indexer cache.Indexer
|
listers.ResourceIndexer[*zalandoorgv1.FabricEventStream]
|
||||||
namespace string
|
|
||||||
}
|
|
||||||
|
|
||||||
// List lists all FabricEventStreams in the indexer for a given namespace.
|
|
||||||
func (s fabricEventStreamNamespaceLister) List(selector labels.Selector) (ret []*v1.FabricEventStream, err error) {
|
|
||||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
|
||||||
ret = append(ret, m.(*v1.FabricEventStream))
|
|
||||||
})
|
|
||||||
return ret, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get retrieves the FabricEventStream from the indexer for a given namespace and name.
|
|
||||||
func (s fabricEventStreamNamespaceLister) Get(name string) (*v1.FabricEventStream, error) {
|
|
||||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
return nil, errors.NewNotFound(v1.Resource("fabriceventstream"), name)
|
|
||||||
}
|
|
||||||
return obj.(*v1.FabricEventStream), nil
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,39 @@ import (
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NamespacedName describes the namespace/name pairs used in Kubernetes names.
|
// NamespacedName comprises a resource name, with a mandatory namespace,
|
||||||
type NamespacedName types.NamespacedName
|
// rendered as "<namespace>/<name>". Being a type captures intent and
|
||||||
|
// helps make sure that UIDs, namespaced names and non-namespaced names
|
||||||
|
// do not get conflated in code. For most use cases, namespace and name
|
||||||
|
// will already have been format validated at the API entry point, so we
|
||||||
|
// don't do that here. Where that's not the case (e.g. in testing),
|
||||||
|
// consider using NamespacedNameOrDie() in testing.go in this package.
|
||||||
|
//
|
||||||
|
// from: https://github.com/kubernetes/apimachinery/blob/master/pkg/types/namespacedname.go
|
||||||
|
type NamespacedName struct {
|
||||||
|
Namespace string `json:"namespace,omitempty"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
Separator = '/'
|
||||||
|
)
|
||||||
|
|
||||||
|
// String returns the general purpose string representation
|
||||||
|
func (n NamespacedName) String() string {
|
||||||
|
return n.Namespace + string(Separator) + n.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalLog emits a struct containing required key/value pair
|
||||||
|
func (n NamespacedName) MarshalLog() interface{} {
|
||||||
|
return struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Namespace string `json:"namespace,omitempty"`
|
||||||
|
}{
|
||||||
|
Name: n.Name,
|
||||||
|
Namespace: n.Namespace,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
|
const fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
|
||||||
|
|
||||||
|
|
@ -131,10 +162,6 @@ type ControllerConfig struct {
|
||||||
// cached value for the GetOperatorNamespace
|
// cached value for the GetOperatorNamespace
|
||||||
var operatorNamespace string
|
var operatorNamespace string
|
||||||
|
|
||||||
func (n NamespacedName) String() string {
|
|
||||||
return types.NamespacedName(n).String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON defines marshaling rule for the namespaced name type.
|
// MarshalJSON defines marshaling rule for the namespaced name type.
|
||||||
func (n NamespacedName) MarshalJSON() ([]byte, error) {
|
func (n NamespacedName) MarshalJSON() ([]byte, error) {
|
||||||
return []byte("\"" + n.String() + "\""), nil
|
return []byte("\"" + n.String() + "\""), nil
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,14 @@ func (ths *teamHashSet) toMap() map[string][]string {
|
||||||
return newTeamMap
|
return newTeamMap
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func mapStringSliceToStringSliceMap[T ~[]string](input map[string]T) map[string][]string {
|
||||||
|
output := make(map[string][]string)
|
||||||
|
for k, v := range input {
|
||||||
|
output[k] = []string(v)
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
|
||||||
func (ths *teamHashSet) mergeCrdMap(crdTeamMap map[string][]string) {
|
func (ths *teamHashSet) mergeCrdMap(crdTeamMap map[string][]string) {
|
||||||
for t, at := range crdTeamMap {
|
for t, at := range crdTeamMap {
|
||||||
ths.add(t, at)
|
ths.add(t, at)
|
||||||
|
|
@ -110,9 +118,9 @@ func (ptm *PostgresTeamMap) Load(pgTeams *acidv1.PostgresTeamList) {
|
||||||
teamIDs := make(map[string]struct{})
|
teamIDs := make(map[string]struct{})
|
||||||
|
|
||||||
for _, pgTeam := range pgTeams.Items {
|
for _, pgTeam := range pgTeams.Items {
|
||||||
superuserTeamSet.mergeCrdMap(pgTeam.Spec.AdditionalSuperuserTeams)
|
superuserTeamSet.mergeCrdMap(mapStringSliceToStringSliceMap(pgTeam.Spec.AdditionalSuperuserTeams))
|
||||||
teamSet.mergeCrdMap(pgTeam.Spec.AdditionalTeams)
|
teamSet.mergeCrdMap(mapStringSliceToStringSliceMap(pgTeam.Spec.AdditionalTeams))
|
||||||
teamMemberSet.mergeCrdMap(pgTeam.Spec.AdditionalMembers)
|
teamMemberSet.mergeCrdMap(mapStringSliceToStringSliceMap(pgTeam.Spec.AdditionalMembers))
|
||||||
}
|
}
|
||||||
fetchTeams(&teamIDs, superuserTeamSet)
|
fetchTeams(&teamIDs, superuserTeamSet)
|
||||||
fetchTeams(&teamIDs, teamSet)
|
fetchTeams(&teamIDs, teamSet)
|
||||||
|
|
|
||||||
|
|
@ -24,9 +24,9 @@ var (
|
||||||
Name: "teamAB",
|
Name: "teamAB",
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresTeamSpec{
|
Spec: acidv1.PostgresTeamSpec{
|
||||||
AdditionalSuperuserTeams: map[string][]string{"teamA": []string{"teamB", "team24x7"}, "teamB": []string{"teamA", "teamC", "team24x7"}},
|
AdditionalSuperuserTeams: map[string]acidv1.SuperUserTeams{"teamA": []string{"teamB", "team24x7"}, "teamB": []string{"teamA", "teamC", "team24x7"}},
|
||||||
AdditionalTeams: map[string][]string{"teamA": []string{"teamC"}, "teamB": []string{}},
|
AdditionalTeams: map[string]acidv1.Teams{"teamA": []string{"teamC"}, "teamB": []string{}},
|
||||||
AdditionalMembers: map[string][]string{"team24x7": []string{"optimusprime"}, "teamB": []string{"drno"}},
|
AdditionalMembers: map[string]acidv1.Users{"team24x7": []string{"optimusprime"}, "teamB": []string{"drno"}},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
|
@ -37,9 +37,9 @@ var (
|
||||||
Name: "teamC",
|
Name: "teamC",
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresTeamSpec{
|
Spec: acidv1.PostgresTeamSpec{
|
||||||
AdditionalSuperuserTeams: map[string][]string{"teamC": []string{"team24x7"}},
|
AdditionalSuperuserTeams: map[string]acidv1.SuperUserTeams{"teamC": []string{"team24x7"}},
|
||||||
AdditionalTeams: map[string][]string{"teamA": []string{"teamC"}, "teamC": []string{"teamA", "teamB", "acid"}},
|
AdditionalTeams: map[string]acidv1.Teams{"teamA": []string{"teamC"}, "teamC": []string{"teamA", "teamB", "acid"}},
|
||||||
AdditionalMembers: map[string][]string{"acid": []string{"batman"}},
|
AdditionalMembers: map[string]acidv1.Users{"acid": []string{"batman"}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -51,9 +51,9 @@ var (
|
||||||
Name: "teamD",
|
Name: "teamD",
|
||||||
},
|
},
|
||||||
Spec: acidv1.PostgresTeamSpec{
|
Spec: acidv1.PostgresTeamSpec{
|
||||||
AdditionalSuperuserTeams: map[string][]string{},
|
AdditionalSuperuserTeams: map[string]acidv1.SuperUserTeams{},
|
||||||
AdditionalTeams: map[string][]string{"teamA": []string{"teamD"}, "teamC": []string{"teamD"}, "teamD": []string{"teamA", "teamB", "teamC"}},
|
AdditionalTeams: map[string]acidv1.Teams{"teamA": []string{"teamD"}, "teamC": []string{"teamD"}, "teamD": []string{"teamA", "teamB", "teamC"}},
|
||||||
AdditionalMembers: map[string][]string{"acid": []string{"batman"}},
|
AdditionalMembers: map[string]acidv1.Users{"acid": []string{"batman"}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -66,6 +66,8 @@ type Resources struct {
|
||||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||||
IgnoreInstanceLimitsAnnotationKey string `name:"ignore_instance_limits_annotation_key"`
|
IgnoreInstanceLimitsAnnotationKey string `name:"ignore_instance_limits_annotation_key"`
|
||||||
|
|
||||||
|
IgnoreResourcesLimitsAnnotationKey string `name:"ignore_resources_limits_annotation_key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type InfrastructureRole struct {
|
type InfrastructureRole struct {
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
package httpclient
|
package httpclient
|
||||||
|
|
||||||
//go:generate mockgen -package mocks -destination=../../../mocks/$GOFILE -source=$GOFILE -build_flags=-mod=vendor
|
//go:generate go tool mockgen -package mocks -destination=../../../mocks/$GOFILE -source=$GOFILE
|
||||||
|
|
||||||
import "net/http"
|
import "net/http"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -191,24 +191,8 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPostgresCRDStatus of Postgres cluster
|
// SetPostgresCRDStatus of Postgres cluster
|
||||||
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string) (*apiacidv1.Postgresql, error) {
|
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, pg *apiacidv1.Postgresql) (*apiacidv1.Postgresql, error) {
|
||||||
var pg *apiacidv1.Postgresql
|
pg, err := client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).UpdateStatus(context.TODO(), pg, metav1.UpdateOptions{})
|
||||||
var pgStatus apiacidv1.PostgresStatus
|
|
||||||
pgStatus.PostgresClusterStatus = status
|
|
||||||
|
|
||||||
patch, err := json.Marshal(struct {
|
|
||||||
PgStatus interface{} `json:"status"`
|
|
||||||
}{&pgStatus})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return pg, fmt.Errorf("could not marshal status: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ),
|
|
||||||
// however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11)
|
|
||||||
// we should take advantage of it.
|
|
||||||
pg, err = client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).Patch(
|
|
||||||
context.TODO(), clusterName.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pg, fmt.Errorf("could not update status: %v", err)
|
return pg, fmt.Errorf("could not update status: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
package volumes
|
package volumes
|
||||||
|
|
||||||
//go:generate mockgen -package mocks -destination=../../../mocks/$GOFILE -source=$GOFILE -build_flags=-mod=vendor
|
//go:generate go tool mockgen -package mocks -destination=../../../mocks/$GOFILE -source=$GOFILE
|
||||||
|
|
||||||
import v1 "k8s.io/api/core/v1"
|
import v1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -98,7 +98,7 @@ function build_operator_binary(){
|
||||||
|
|
||||||
# redirecting stderr greatly reduces non-informative output during normal builds
|
# redirecting stderr greatly reduces non-informative output during normal builds
|
||||||
echo "Build operator binary (stderr redirected to /dev/null)..."
|
echo "Build operator binary (stderr redirected to /dev/null)..."
|
||||||
make clean deps local test > /dev/null 2>&1
|
make clean local test > /dev/null 2>&1
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -11,4 +11,4 @@ kubernetes==11.0.0
|
||||||
python-json-logger==2.0.7
|
python-json-logger==2.0.7
|
||||||
requests==2.32.4
|
requests==2.32.4
|
||||||
stups-tokens>=1.1.19
|
stups-tokens>=1.1.19
|
||||||
werkzeug==3.1.4
|
werkzeug==3.1.5
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue