diff --git a/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md b/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md new file mode 100644 index 000000000..a4dec9409 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md @@ -0,0 +1,19 @@ +--- +name: Postgres Operator issue template +about: How are you using the operator? +title: '' +labels: '' +assignees: '' + +--- + +Please, answer some short questions which should help us to understand your problem / question better? + +- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.0 +- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s] +- **Are you running Postgres Operator in production?** [yes | no] +- **Type of issue?** [Bug report, question, feature request, etc.] + +Some general remarks when posting a bug report: +- Please, check the operator, pod (Patroni) and postgresql logs first. When copy-pasting many log lines please do it in a separate GitHub gist together with your Postgres CRD and configuration manifest. +- If you feel this issue might be more related to the [Spilo](https://github.com/zalando/spilo/issues) docker image or [Patroni](https://github.com/zalando/patroni/issues), consider opening issues in the respective repos. diff --git a/.github/PULL_REQUEST_TEMPLATE/postgres-operator-pull-request-template.md b/.github/PULL_REQUEST_TEMPLATE/postgres-operator-pull-request-template.md new file mode 100644 index 000000000..78ebc4993 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/postgres-operator-pull-request-template.md @@ -0,0 +1,18 @@ +## Problem description + + + +## Linked issues + + + +## Checklist + +Thanks for submitting a pull request to the Postgres Operator project. +Please, ensure your contribution matches the following items: + +- [ ] Your go code is [formatted](https://blog.golang.org/gofmt). Your IDE should do it automatically for you. +- [ ] You have updated [generated code](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#code-generation) when introducing new fields to the `acid.zalan.do` api package. +- [ ] New [configuration options](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#introduce-additional-configuration-parameters) are reflected in CRD validation, helm charts and sample manifests. +- [ ] New functionality is covered by [unit](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#unit-tests) and/or [e2e](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#end-to-end-tests) tests. +- [ ] You have checked existing open PRs for possible overlay and referenced them. diff --git a/.github/workflows/run_e2e.yaml b/.github/workflows/run_e2e.yaml new file mode 100644 index 000000000..cff0d49ef --- /dev/null +++ b/.github/workflows/run_e2e.yaml @@ -0,0 +1,25 @@ +name: operator-e2e-tests + +on: + pull_request: + push: + branches: + - master + +jobs: + tests: + name: End-2-End tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - uses: actions/setup-go@v2 + with: + go-version: "^1.15.6" + - name: Make dependencies + run: make deps mocks + - name: Compile + run: make linux + - name: Run unit tests + run: go test ./... + - name: Run end-2-end tests + run: make e2e diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml new file mode 100644 index 000000000..ebcdfedf6 --- /dev/null +++ b/.github/workflows/run_tests.yaml @@ -0,0 +1,30 @@ +name: operator-tests + +on: + pull_request: + push: + branches: + - master + +jobs: + tests: + name: Unit tests and coverage + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - uses: actions/setup-go@v2 + with: + go-version: "^1.15.6" + - name: Make dependencies + run: make deps mocks + - name: Compile + run: make linux + - name: Run unit tests + run: go test -race -covermode atomic -coverprofile=coverage.out ./... + - name: Convert coverage to lcov + uses: jandelgado/gcov2lcov-action@v1.0.5 + - name: Coveralls + uses: coverallsapp/github-action@master + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + path-to-lcov: coverage.lcov diff --git a/.gitignore b/.gitignore index 0fdb50756..e062f8479 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ _testmain.go /docker/build/ /github.com/ .idea +.vscode scm-source.json @@ -47,6 +48,8 @@ __pycache__/ # Distribution / packaging .Python +ui/app/node_modules +ui/operator_ui/static/build build/ develop-eggs/ dist/ @@ -95,3 +98,5 @@ e2e/manifests # Translations *.mo *.pot + +mocks diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 091c875ba..000000000 --- a/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -dist: trusty -sudo: false - -branches: - only: - - master - -language: go - -go: - - "1.14.x" - -before_install: - - go get github.com/mattn/goveralls - -install: - - make deps - -script: - - hack/verify-codegen.sh - - travis_wait 20 goveralls -service=travis-ci -package ./pkg/... -v - - make e2e diff --git a/CODEOWNERS b/CODEOWNERS index 96fe74510..398856c66 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ # global owners -* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih +* @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih diff --git a/MAINTAINERS b/MAINTAINERS index 4f4ca87ba..572e6d971 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1,3 +1,5 @@ -Oleksii Kliukin Dmitrii Dolgov Sergey Dudoladov +Felix Kunde +Jan Mussler +Rafia Sabih \ No newline at end of file diff --git a/Makefile b/Makefile index 69dd240db..fe2387670 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: clean local test linux macos docker push scm-source.json e2e +.PHONY: clean local test linux macos mocks docker push scm-source.json e2e BINARY ?= postgres-operator BUILD_FLAGS ?= -v @@ -24,12 +24,16 @@ PKG := `go list ./... | grep -v /vendor/` ifeq ($(DEBUG),1) DOCKERFILE = DebugDockerfile - DEBUG_POSTFIX := -debug + DEBUG_POSTFIX := -debug-$(shell date hhmmss) BUILD_FLAGS += -gcflags "-N -l" else DOCKERFILE = Dockerfile endif +ifeq ($(FRESH),1) + DEBUG_FRESH=$(shell date +"%H-%M-%S") +endif + ifdef CDP_PULL_REQUEST_NUMBER CDP_TAG := -${CDP_BUILD_VERSION} endif @@ -66,7 +70,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE} docker-context echo "Version ${VERSION}" echo "CDP tag ${CDP_TAG}" echo "git describe $(shell git describe --tags --always --dirty)" - cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" . + cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" . indocker-race: docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.8.1 bash -c "make linux" @@ -77,9 +81,12 @@ push: scm-source.json: .git echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json +mocks: + GO111MODULE=on go generate ./... + tools: - GO111MODULE=on go get -u honnef.co/go/tools/cmd/staticcheck - GO111MODULE=on go get k8s.io/client-go@kubernetes-1.16.3 + GO111MODULE=on go get k8s.io/client-go@kubernetes-1.19.3 + GO111MODULE=on go get github.com/golang/mock/mockgen@v1.4.4 GO111MODULE=on go mod tidy fmt: @@ -97,4 +104,4 @@ test: GO111MODULE=on go test ./... e2e: docker # build operator image to be tested - cd e2e; make tools e2etest clean + cd e2e; make e2etest \ No newline at end of file diff --git a/README.md b/README.md index 564bb68a1..7edb60d84 100644 --- a/README.md +++ b/README.md @@ -1,32 +1,34 @@ # Postgres Operator -[![Build Status](https://travis-ci.org/zalando/postgres-operator.svg?branch=master)](https://travis-ci.org/zalando/postgres-operator) -[![Coverage Status](https://coveralls.io/repos/github/zalando/postgres-operator/badge.svg)](https://coveralls.io/github/zalando/postgres-operator) -[![Go Report Card](https://goreportcard.com/badge/github.com/zalando/postgres-operator)](https://goreportcard.com/report/github.com/zalando/postgres-operator) -[![GoDoc](https://godoc.org/github.com/zalando/postgres-operator?status.svg)](https://godoc.org/github.com/zalando/postgres-operator) -[![golangci](https://golangci.com/badges/github.com/zalando/postgres-operator.svg)](https://golangci.com/r/github.com/zalando/postgres-operator) +![Tests](https://github.com/zalando/postgres-operator/workflows/operator-tests/badge.svg) +![E2E Tests](https://github.com/zalando/postgres-operator/workflows/operator-e2e-tests/badge.svg) +[![Coverage Status](https://coveralls.io/repos/github/zalando/postgres-operator/badge.svg?branch=master)](https://coveralls.io/github/zalando/postgres-operator?branch=master) -The Postgres Operator enables highly-available [PostgreSQL](https://www.postgresql.org/) +The Postgres Operator delivers an easy to run highly-available [PostgreSQL](https://www.postgresql.org/) clusters on Kubernetes (K8s) powered by [Patroni](https://github.com/zalando/spilo). -It is configured only through manifests to ease integration into automated CI/CD -pipelines with no access to Kubernetes directly. +It is configured only through Postgres manifests (CRDs) to ease integration into automated CI/CD +pipelines with no access to Kubernetes API directly, promoting infrastructure as code vs manual operations. ### Operator features -* Rolling updates on Postgres cluster changes -* Volume resize without Pod restarts -* Database connection pooler -* Cloning Postgres clusters -* Logical backups to S3 Bucket +* Rolling updates on Postgres cluster changes, incl. quick minor version updates +* Live volume resize without pod restarts (AWS EBS, PVC) +* Database connection pooler with PGBouncer +* Restore and cloning Postgres clusters (incl. major version upgrade) +* Additionally logical backups to S3 bucket can be configured * Standby cluster from S3 WAL archive * Configurable for non-cloud environments +* Basic credential and user management on K8s, eases application deployments +* Support for custom TLS certificates * UI to create and edit Postgres cluster manifests +* Works well on Amazon AWS, Google Cloud, OpenShift and locally on Kind +* Base support for AWS EBS gp3 migration (iops, throughput pending) ### PostgreSQL features -* Supports PostgreSQL 9.6+ +* Supports PostgreSQL 13, starting from 9.5+ * Streaming replication cluster via Patroni * Point-In-Time-Recovery with [pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) / @@ -48,13 +50,35 @@ pipelines with no access to Kubernetes directly. [timescaledb](https://github.com/timescale/timescaledb) The Postgres Operator has been developed at Zalando and is being used in -production for over two years. +production for over three years. + +## Notes on Postgres 13 support + +If you are new to the operator, you can skip this and just start using the Postgres operator as is, Postgres 13 is ready to go. + +The Postgres operator supports Postgres 13 with the new Spilo Image that includes also the recent Patroni version to support PG13 settings. +More work on optimizing restarts and rolling upgrades is pending. + +If you are already using the Postgres operator in older version with a Spilo 12 Docker image you need to be aware of the changes for the backup path. +We introduce the major version into the backup path to smoothen the [major version upgrade](docs/administrator.md#minor-and-major-version-upgrade) that is now supported manually. + +The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths. +This comes at potential performance costs and should be disabled after a few days. + +The new Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p2` + +The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5` + ## Getting started For a quick first impression follow the instructions of this [tutorial](docs/quickstart.md). +## Supported setups of Postgres and Applications + +![Features](docs/diagrams/neutral_operator.png) + ## Documentation There is a browser-friendly version of this documentation at @@ -70,12 +94,6 @@ There is a browser-friendly version of this documentation at * [Postgres manifest reference](docs/reference/cluster_manifest.md) * [Command-line options and environment variables](docs/reference/command_line_and_environment.md) -## Google Summer of Code - -The Postgres Operator made it to the [Google Summer of Code 2019](https://summerofcode.withgoogle.com/organizations/5429926902104064/)! -Check [our ideas](docs/gsoc-2019/ideas.md#google-summer-of-code-2019) -and start discussions in [the issue tracker](https://github.com/zalando/postgres-operator/issues). - ## Community There are two places to get in touch with the community: diff --git a/charts/postgres-operator-ui/Chart.yaml b/charts/postgres-operator-ui/Chart.yaml index 13550d67e..9be6c84dd 100644 --- a/charts/postgres-operator-ui/Chart.yaml +++ b/charts/postgres-operator-ui/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: postgres-operator-ui -version: 1.5.0 -appVersion: 1.5.0 +version: 1.6.0 +appVersion: 1.6.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience keywords: diff --git a/charts/postgres-operator-ui/index.yaml b/charts/postgres-operator-ui/index.yaml index 2da53497d..948a52274 100644 --- a/charts/postgres-operator-ui/index.yaml +++ b/charts/postgres-operator-ui/index.yaml @@ -2,11 +2,34 @@ apiVersion: v1 entries: postgres-operator-ui: - apiVersion: v1 - appVersion: 1.5.0 - created: "2020-05-08T12:07:31.651762139+02:00" + appVersion: 1.6.0 + created: "2020-12-18T14:19:25.464717041+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience - digest: d7a36de8a3716f7b7954e2e51ed07863ea764dbb129a2fd3ac6a453f9e98a115 + digest: d7813a235dd1015377c38fd5a14e7679a411c7340a25cfcf5f5294405f9a2eb2 + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - ui + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + name: postgres-operator-ui + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-ui-1.6.0.tgz + version: 1.6.0 + - apiVersion: v1 + appVersion: 1.5.0 + created: "2020-12-18T14:19:25.464015993+01:00" + description: Postgres Operator UI provides a graphical interface for a convenient + database-as-a-service user experience + digest: c91ea39e6d51d57f4048fb1b6ec53b40823f2690eb88e4e4f1a036367b9fdd61 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -24,29 +47,4 @@ entries: urls: - postgres-operator-ui-1.5.0.tgz version: 1.5.0 - - apiVersion: v1 - appVersion: 1.4.0 - created: "2020-05-08T12:07:31.651223951+02:00" - description: Postgres Operator UI provides a graphical interface for a convenient - database-as-a-service user experience - digest: 00e0eff7056d56467cd5c975657fbb76c8d01accd25a4b7aca81bc42aeac961d - home: https://github.com/zalando/postgres-operator - keywords: - - postgres - - operator - - ui - - cloud-native - - patroni - - spilo - maintainers: - - email: opensource@zalando.de - name: Zalando - - email: sk@sik-net.de - name: siku4 - name: postgres-operator-ui - sources: - - https://github.com/zalando/postgres-operator - urls: - - postgres-operator-ui-1.4.0.tgz - version: 1.4.0 -generated: "2020-05-08T12:07:31.650495247+02:00" +generated: "2020-12-18T14:19:25.463104102+01:00" diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.4.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.4.0.tgz deleted file mode 100644 index 8d1276dd1..000000000 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.4.0.tgz and /dev/null differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.5.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.5.0.tgz index 4443e15bc..d8527f293 100644 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.5.0.tgz and b/charts/postgres-operator-ui/postgres-operator-ui-1.5.0.tgz differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.6.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.6.0.tgz new file mode 100644 index 000000000..68a43b51b Binary files /dev/null and b/charts/postgres-operator-ui/postgres-operator-ui-1.6.0.tgz differ diff --git a/charts/postgres-operator-ui/templates/deployment.yaml b/charts/postgres-operator-ui/templates/deployment.yaml index 00610c799..29bf2e670 100644 --- a/charts/postgres-operator-ui/templates/deployment.yaml +++ b/charts/postgres-operator-ui/templates/deployment.yaml @@ -21,6 +21,10 @@ spec: team: "acid" # Parameterize? spec: serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} containers: - name: "service" image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" @@ -46,7 +50,7 @@ spec: - name: "RESOURCES_VISIBLE" value: "{{ .Values.envs.resourcesVisible }}" - name: "TARGET_NAMESPACE" - value: {{ .Values.envs.targetNamespace }} + value: "{{ .Values.envs.targetNamespace }}" - name: "TEAMS" value: |- [ @@ -64,10 +68,8 @@ spec: "resources_visible": true, "users_visible": true, "postgresql_versions": [ + "13", "12", - "11", - "10", - "9.6", - "9.5" + "11" ] } diff --git a/charts/postgres-operator-ui/values.yaml b/charts/postgres-operator-ui/values.yaml index 2fdb8f894..dea5007c9 100644 --- a/charts/postgres-operator-ui/values.yaml +++ b/charts/postgres-operator-ui/values.yaml @@ -8,9 +8,15 @@ replicaCount: 1 image: registry: registry.opensource.zalan.do repository: acid/postgres-operator-ui - tag: v1.5.0-dirty + tag: v1.6.0 pullPolicy: "IfNotPresent" +# Optionally specify an array of imagePullSecrets. +# Secrets must be manually created in the namespace. +# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +# imagePullSecrets: +# - name: + rbac: # Specifies whether RBAC resources should be created create: true @@ -43,7 +49,7 @@ envs: # configure UI service service: type: "ClusterIP" - port: "8081" + port: "80" # If the type of the service is NodePort a port can be specified using the nodePort field # If the nodePort field is not specified, or if it has no value, then a random port is used # notePort: 32521 diff --git a/charts/postgres-operator/Chart.yaml b/charts/postgres-operator/Chart.yaml index cd9f75586..e5a66b6e3 100644 --- a/charts/postgres-operator/Chart.yaml +++ b/charts/postgres-operator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: postgres-operator -version: 1.5.0 -appVersion: 1.5.0 +version: 1.6.0 +appVersion: 1.6.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes keywords: diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index ffcef7b4a..2ac9ca7fc 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: operatorconfigurations.acid.zalan.do @@ -15,365 +15,441 @@ spec: singular: operatorconfiguration shortNames: - opconfig - additionalPrinterColumns: - - name: Image - type: string - description: Spilo image to be used for Pods - JSONPath: .configuration.docker_image - - name: Cluster-Label - type: string - description: Label for K8s resources created by operator - JSONPath: .configuration.kubernetes.cluster_name_label - - name: Service-Account - type: string - description: Name of service account to be used - JSONPath: .configuration.kubernetes.pod_service_account_name - - name: Min-Instances - type: integer - description: Minimum number of instances per Postgres cluster - JSONPath: .configuration.min_instances - - name: Age - type: date - JSONPath: .metadata.creationTimestamp + categories: + - all scope: Namespaced - subresources: - status: {} - version: v1 - validation: - openAPIV3Schema: - type: object - required: - - kind - - apiVersion - - configuration - properties: - kind: - type: string - enum: - - OperatorConfiguration - apiVersion: - type: string - enum: - - acid.zalan.do/v1 - configuration: - type: object - properties: - docker_image: - type: string - enable_crd_validation: - type: boolean - enable_lazy_spilo_upgrade: - type: boolean - enable_shm_volume: - type: boolean - etcd_host: - type: string - kubernetes_use_configmaps: - type: boolean - max_instances: - type: integer - minimum: -1 # -1 = disabled - min_instances: - type: integer - minimum: -1 # -1 = disabled - resync_period: - type: string - repair_period: - type: string - set_memory_request_to_limit: - type: boolean - sidecar_docker_images: - type: object - additionalProperties: - type: string - sidecars: - type: array - nullable: true - items: - type: object - additionalProperties: true - workers: - type: integer - minimum: 1 - users: - type: object - properties: - replication_username: - type: string - super_username: - type: string - kubernetes: - type: object - properties: - cluster_domain: - type: string - cluster_labels: - type: object - additionalProperties: - type: string - cluster_name_label: - type: string - custom_pod_annotations: - type: object - additionalProperties: - type: string - downscaler_annotations: - type: array - items: - type: string - enable_init_containers: - type: boolean - enable_pod_antiaffinity: - type: boolean - enable_pod_disruption_budget: - type: boolean - enable_sidecars: - type: boolean - infrastructure_roles_secret_name: - type: string - inherited_labels: - type: array - items: - type: string - master_pod_move_timeout: - type: string - node_readiness_label: - type: object - additionalProperties: - type: string - oauth_token_secret_name: - type: string - pdb_name_format: - type: string - pod_antiaffinity_topology_key: - type: string - pod_environment_configmap: - type: string - pod_management_policy: - type: string - enum: - - "ordered_ready" - - "parallel" - pod_priority_class_name: - type: string - pod_role_label: - type: string - pod_service_account_definition: - type: string - pod_service_account_name: - type: string - pod_service_account_role_binding_definition: - type: string - pod_terminate_grace_period: - type: string - secret_name_template: - type: string - spilo_fsgroup: - type: integer - spilo_privileged: - type: boolean - toleration: - type: object - additionalProperties: - type: string - watched_namespace: - type: string - postgres_pod_resources: - type: object - properties: - default_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - min_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - min_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - timeouts: - type: object - properties: - pod_label_wait_timeout: - type: string - pod_deletion_wait_timeout: - type: string - ready_wait_interval: - type: string - ready_wait_timeout: - type: string - resource_check_interval: - type: string - resource_check_timeout: - type: string - load_balancer: - type: object - properties: - custom_service_annotations: - type: object - additionalProperties: - type: string - db_hosted_zone: - type: string - enable_master_load_balancer: - type: boolean - enable_replica_load_balancer: - type: boolean - master_dns_name_format: - type: string - replica_dns_name_format: - type: string - aws_or_gcp: - type: object - properties: - additional_secret_mount: - type: string - additional_secret_mount_path: - type: string - aws_region: - type: string - kube_iam_role: - type: string - log_s3_bucket: - type: string - wal_s3_bucket: - type: string - logical_backup: - type: object - properties: - logical_backup_docker_image: - type: string - logical_backup_s3_access_key_id: - type: string - logical_backup_s3_bucket: - type: string - logical_backup_s3_endpoint: - type: string - logical_backup_s3_region: - type: string - logical_backup_s3_secret_access_key: - type: string - logical_backup_s3_sse: - type: string - logical_backup_schedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' - debug: - type: object - properties: - debug_logging: - type: boolean - enable_database_access: - type: boolean - teams_api: - type: object - properties: - enable_admin_role_for_users: - type: boolean - enable_team_superuser: - type: boolean - enable_teams_api: - type: boolean - pam_configuration: - type: string - pam_role_name: - type: string - postgres_superuser_teams: - type: array - items: - type: string - protected_role_names: - type: array - items: - type: string - team_admin_role: - type: string - team_api_role_configuration: - type: object - additionalProperties: - type: string - teams_api_url: - type: string - logging_rest_api: - type: object - properties: - api_port: - type: integer - cluster_history_entries: - type: integer - ring_log_lines: - type: integer - scalyr: # deprecated - type: object - properties: - scalyr_api_key: - type: string - scalyr_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - scalyr_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - scalyr_image: - type: string - scalyr_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - scalyr_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - scalyr_server_url: - type: string - connection_pooler: - type: object - properties: - connection_pooler_schema: - type: string - #default: "pooler" - connection_pooler_user: - type: string - #default: "pooler" - connection_pooler_image: - type: string - #default: "registry.opensource.zalan.do/acid/pgbouncer" - connection_pooler_max_db_connections: - type: integer - #default: 60 - connection_pooler_mode: - type: string - enum: - - "session" - - "transaction" - #default: "transaction" - connection_pooler_number_of_instances: - type: integer - minimum: 2 - #default: 2 - connection_pooler_default_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - #default: "1" - connection_pooler_default_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - #default: "500m" - connection_pooler_default_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - #default: "100Mi" - connection_pooler_default_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - #default: "100Mi" - status: - type: object - additionalProperties: + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Image + type: string + description: Spilo image to be used for Pods + jsonPath: .configuration.docker_image + - name: Cluster-Label + type: string + description: Label for K8s resources created by operator + jsonPath: .configuration.kubernetes.cluster_name_label + - name: Service-Account + type: string + description: Name of service account to be used + jsonPath: .configuration.kubernetes.pod_service_account_name + - name: Min-Instances + type: integer + description: Minimum number of instances per Postgres cluster + jsonPath: .configuration.min_instances + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - configuration + properties: + kind: type: string + enum: + - OperatorConfiguration + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + configuration: + type: object + properties: + docker_image: + type: string + enable_crd_validation: + type: boolean + enable_lazy_spilo_upgrade: + type: boolean + enable_pgversion_env_var: + type: boolean + enable_shm_volume: + type: boolean + enable_spilo_wal_path_compat: + type: boolean + etcd_host: + type: string + kubernetes_use_configmaps: + type: boolean + max_instances: + type: integer + minimum: -1 # -1 = disabled + min_instances: + type: integer + minimum: -1 # -1 = disabled + resync_period: + type: string + repair_period: + type: string + set_memory_request_to_limit: + type: boolean + sidecar_docker_images: + type: object + additionalProperties: + type: string + sidecars: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + workers: + type: integer + minimum: 1 + users: + type: object + properties: + replication_username: + type: string + super_username: + type: string + kubernetes: + type: object + properties: + cluster_domain: + type: string + cluster_labels: + type: object + additionalProperties: + type: string + cluster_name_label: + type: string + custom_pod_annotations: + type: object + additionalProperties: + type: string + delete_annotation_date_key: + type: string + delete_annotation_name_key: + type: string + downscaler_annotations: + type: array + items: + type: string + enable_init_containers: + type: boolean + enable_pod_antiaffinity: + type: boolean + enable_pod_disruption_budget: + type: boolean + enable_sidecars: + type: boolean + infrastructure_roles_secret_name: + type: string + infrastructure_roles_secrets: + type: array + nullable: true + items: + type: object + required: + - secretname + - userkey + - passwordkey + properties: + secretname: + type: string + userkey: + type: string + passwordkey: + type: string + rolekey: + type: string + defaultuservalue: + type: string + defaultrolevalue: + type: string + details: + type: string + template: + type: boolean + inherited_annotations: + type: array + items: + type: string + inherited_labels: + type: array + items: + type: string + master_pod_move_timeout: + type: string + node_readiness_label: + type: object + additionalProperties: + type: string + oauth_token_secret_name: + type: string + pdb_name_format: + type: string + pod_antiaffinity_topology_key: + type: string + pod_environment_configmap: + type: string + pod_environment_secret: + type: string + pod_management_policy: + type: string + enum: + - "ordered_ready" + - "parallel" + pod_priority_class_name: + type: string + pod_role_label: + type: string + pod_service_account_definition: + type: string + pod_service_account_name: + type: string + pod_service_account_role_binding_definition: + type: string + pod_terminate_grace_period: + type: string + secret_name_template: + type: string + spilo_runasuser: + type: integer + spilo_runasgroup: + type: integer + spilo_fsgroup: + type: integer + spilo_privileged: + type: boolean + storage_resize_mode: + type: string + enum: + - "ebs" + - "pvc" + - "off" + toleration: + type: object + additionalProperties: + type: string + watched_namespace: + type: string + postgres_pod_resources: + type: object + properties: + default_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + min_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + min_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + timeouts: + type: object + properties: + pod_label_wait_timeout: + type: string + pod_deletion_wait_timeout: + type: string + ready_wait_interval: + type: string + ready_wait_timeout: + type: string + resource_check_interval: + type: string + resource_check_timeout: + type: string + load_balancer: + type: object + properties: + custom_service_annotations: + type: object + additionalProperties: + type: string + db_hosted_zone: + type: string + enable_master_load_balancer: + type: boolean + enable_replica_load_balancer: + type: boolean + external_traffic_policy: + type: string + enum: + - "Cluster" + - "Local" + master_dns_name_format: + type: string + replica_dns_name_format: + type: string + aws_or_gcp: + type: object + properties: + additional_secret_mount: + type: string + additional_secret_mount_path: + type: string + aws_region: + type: string + enable_ebs_gp3_migration: + type: boolean + enable_ebs_gp3_migration_max_size: + type: integer + gcp_credentials: + type: string + kube_iam_role: + type: string + log_s3_bucket: + type: string + wal_gs_bucket: + type: string + wal_s3_bucket: + type: string + logical_backup: + type: object + properties: + logical_backup_docker_image: + type: string + logical_backup_google_application_credentials: + type: string + logical_backup_provider: + type: string + logical_backup_s3_access_key_id: + type: string + logical_backup_s3_bucket: + type: string + logical_backup_s3_endpoint: + type: string + logical_backup_s3_region: + type: string + logical_backup_s3_secret_access_key: + type: string + logical_backup_s3_sse: + type: string + logical_backup_schedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + debug: + type: object + properties: + debug_logging: + type: boolean + enable_database_access: + type: boolean + teams_api: + type: object + properties: + enable_admin_role_for_users: + type: boolean + enable_postgres_team_crd: + type: boolean + enable_postgres_team_crd_superusers: + type: boolean + enable_team_superuser: + type: boolean + enable_teams_api: + type: boolean + pam_configuration: + type: string + pam_role_name: + type: string + postgres_superuser_teams: + type: array + items: + type: string + protected_role_names: + type: array + items: + type: string + team_admin_role: + type: string + team_api_role_configuration: + type: object + additionalProperties: + type: string + teams_api_url: + type: string + logging_rest_api: + type: object + properties: + api_port: + type: integer + cluster_history_entries: + type: integer + ring_log_lines: + type: integer + scalyr: # deprecated + type: object + properties: + scalyr_api_key: + type: string + scalyr_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + scalyr_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + scalyr_image: + type: string + scalyr_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + scalyr_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + scalyr_server_url: + type: string + connection_pooler: + type: object + properties: + connection_pooler_schema: + type: string + #default: "pooler" + connection_pooler_user: + type: string + #default: "pooler" + connection_pooler_image: + type: string + #default: "registry.opensource.zalan.do/acid/pgbouncer" + connection_pooler_max_db_connections: + type: integer + #default: 60 + connection_pooler_mode: + type: string + enum: + - "session" + - "transaction" + #default: "transaction" + connection_pooler_number_of_instances: + type: integer + minimum: 2 + #default: 2 + connection_pooler_default_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + #default: "1" + connection_pooler_default_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + #default: "500m" + connection_pooler_default_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + #default: "100Mi" + connection_pooler_default_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + #default: "100Mi" + status: + type: object + additionalProperties: + type: string diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index fdbcf8304..13811936d 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: postgresqls.acid.zalan.do @@ -15,465 +15,557 @@ spec: singular: postgresql shortNames: - pg - additionalPrinterColumns: - - name: Team - type: string - description: Team responsible for Postgres CLuster - JSONPath: .spec.teamId - - name: Version - type: string - description: PostgreSQL version - JSONPath: .spec.postgresql.version - - name: Pods - type: integer - description: Number of Pods per Postgres cluster - JSONPath: .spec.numberOfInstances - - name: Volume - type: string - description: Size of the bound volume - JSONPath: .spec.volume.size - - name: CPU-Request - type: string - description: Requested CPU for Postgres containers - JSONPath: .spec.resources.requests.cpu - - name: Memory-Request - type: string - description: Requested memory for Postgres containers - JSONPath: .spec.resources.requests.memory - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - - name: Status - type: string - description: Current sync status of postgresql resource - JSONPath: .status.PostgresClusterStatus + categories: + - all scope: Namespaced - subresources: - status: {} - version: v1 - validation: - openAPIV3Schema: - type: object - required: - - kind - - apiVersion - - spec - properties: - kind: - type: string - enum: - - postgresql - apiVersion: - type: string - enum: - - acid.zalan.do/v1 - spec: - type: object - required: - - numberOfInstances - - teamId - - postgresql - properties: - additionalVolumes: - type: array - items: - type: object - required: - - name - - mountPath - - volumeSource - properties: - name: - type: string - mountPath: - type: string - targetContainers: - type: array - nullable: true - items: - type: string - volumeSource: - type: object - subPath: - type: string - allowedSourceRanges: - type: array - nullable: true - items: - type: string - pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$' - clone: - type: object - required: - - cluster - properties: - cluster: - type: string - s3_endpoint: - type: string - s3_access_key_id: - type: string - s3_secret_access_key: - type: string - s3_force_path_style: - type: boolean - s3_wal_path: - type: string - timestamp: - type: string - pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([Zz])|([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$' - # The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC - # Example: 1996-12-19T16:39:57-08:00 - # Note: this field requires a timezone - uid: - format: uuid - type: string - connectionPooler: - type: object - properties: - dockerImage: - type: string - maxDBConnections: - type: integer - mode: - type: string - enum: - - "session" - - "transaction" - numberOfInstances: - type: integer - minimum: 2 - resources: + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Team + type: string + description: Team responsible for Postgres CLuster + jsonPath: .spec.teamId + - name: Version + type: string + description: PostgreSQL version + jsonPath: .spec.postgresql.version + - name: Pods + type: integer + description: Number of Pods per Postgres cluster + jsonPath: .spec.numberOfInstances + - name: Volume + type: string + description: Size of the bound volume + jsonPath: .spec.volume.size + - name: CPU-Request + type: string + description: Requested CPU for Postgres containers + jsonPath: .spec.resources.requests.cpu + - name: Memory-Request + type: string + description: Requested memory for Postgres containers + jsonPath: .spec.resources.requests.memory + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + - name: Status + type: string + description: Current sync status of postgresql resource + jsonPath: .status.PostgresClusterStatus + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - postgresql + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + required: + - numberOfInstances + - teamId + - postgresql + - volume + properties: + additionalVolumes: + type: array + items: type: object required: - - requests - - limits + - name + - mountPath + - volumeSource properties: - limits: + name: + type: string + mountPath: + type: string + targetContainers: + type: array + nullable: true + items: + type: string + volumeSource: type: object - required: - - cpu - - memory - properties: - cpu: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - memory: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - requests: - type: object - required: - - cpu - - memory - properties: - cpu: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - memory: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - schema: - type: string - user: - type: string - databases: - type: object - additionalProperties: - type: string - # Note: usernames specified here as database owners must be declared in the users key of the spec key. - dockerImage: - type: string - enableConnectionPooler: - type: boolean - enableLogicalBackup: - type: boolean - enableMasterLoadBalancer: - type: boolean - enableReplicaLoadBalancer: - type: boolean - enableShmVolume: - type: boolean - init_containers: # deprecated - type: array - nullable: true - items: - type: object - additionalProperties: true - initContainers: - type: array - nullable: true - items: - type: object - additionalProperties: true - logicalBackupSchedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' - maintenanceWindows: - type: array - items: - type: string - pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' - numberOfInstances: - type: integer - minimum: 0 - patroni: - type: object - properties: - initdb: - type: object - additionalProperties: - type: string - pg_hba: - type: array - items: - type: string - slots: - type: object - additionalProperties: - type: object - additionalProperties: + x-kubernetes-preserve-unknown-fields: true + subPath: type: string - ttl: - type: integer - loop_wait: - type: integer - retry_timeout: - type: integer - synchronous_mode: - type: boolean - synchronous_mode_strict: - type: boolean - maximum_lag_on_failover: - type: integer - podAnnotations: - type: object - additionalProperties: - type: string - pod_priority_class_name: # deprecated - type: string - podPriorityClassName: - type: string - postgresql: - type: object - required: - - version - properties: - version: - type: string - enum: - - "9.3" - - "9.4" - - "9.5" - - "9.6" - - "10" - - "11" - - "12" - parameters: - type: object - additionalProperties: - type: string - preparedDatabases: - type: object - additionalProperties: - type: object - properties: - defaultUsers: - type: boolean - extensions: - type: object - additionalProperties: - type: string - schemas: - type: object - additionalProperties: - type: object - properties: - defaultUsers: - type: boolean - defaultRoles: - type: boolean - replicaLoadBalancer: # deprecated - type: boolean - resources: - type: object - required: - - requests - - limits - properties: - limits: - type: object - required: - - cpu - - memory - properties: - cpu: - type: string - # Decimal natural followed by m, or decimal natural followed by - # dot followed by up to three decimal digits. - # - # This is because the Kubernetes CPU resource has millis as the - # maximum precision. The actual values are checked in code - # because the regular expression would be huge and horrible and - # not very helpful in validation error messages; this one checks - # only the format of the given number. - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - # Note: the value specified here must not be zero or be lower - # than the corresponding request. - memory: - type: string - # You can express memory as a plain integer or as a fixed-point - # integer using one of these suffixes: E, P, T, G, M, k. You can - # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero or be lower - # than the corresponding request. - requests: - type: object - required: - - cpu - - memory - properties: - cpu: - type: string - # Decimal natural followed by m, or decimal natural followed by - # dot followed by up to three decimal digits. - # - # This is because the Kubernetes CPU resource has millis as the - # maximum precision. The actual values are checked in code - # because the regular expression would be huge and horrible and - # not very helpful in validation error messages; this one checks - # only the format of the given number. - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - # Note: the value specified here must not be zero or be higher - # than the corresponding limit. - memory: - type: string - # You can express memory as a plain integer or as a fixed-point - # integer using one of these suffixes: E, P, T, G, M, k. You can - # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero or be higher - # than the corresponding limit. - serviceAnnotations: - type: object - additionalProperties: - type: string - sidecars: - type: array - nullable: true - items: - type: object - additionalProperties: true - spiloFSGroup: - type: integer - standby: - type: object - required: - - s3_wal_path - properties: - s3_wal_path: - type: string - teamId: - type: string - tls: - type: object - required: - - secretName - properties: - secretName: - type: string - certificateFile: - type: string - privateKeyFile: - type: string - caFile: - type: string - caSecretName: - type: string - tolerations: - type: array - items: - type: object - required: - - key - - operator - - effect - properties: - key: - type: string - operator: - type: string - enum: - - Equal - - Exists - value: - type: string - effect: - type: string - enum: - - NoExecute - - NoSchedule - - PreferNoSchedule - tolerationSeconds: - type: integer - useLoadBalancer: # deprecated - type: boolean - users: - type: object - additionalProperties: + allowedSourceRanges: type: array nullable: true - description: "Role flags specified here must not contradict each other" items: type: string - enum: - - bypassrls - - BYPASSRLS - - nobypassrls - - NOBYPASSRLS - - createdb - - CREATEDB - - nocreatedb - - NOCREATEDB - - createrole - - CREATEROLE - - nocreaterole - - NOCREATEROLE - - inherit - - INHERIT - - noinherit - - NOINHERIT - - login - - LOGIN - - nologin - - NOLOGIN - - replication - - REPLICATION - - noreplication - - NOREPLICATION - - superuser - - SUPERUSER - - nosuperuser - - NOSUPERUSER - volume: - type: object - required: - - size - properties: - size: + pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$' + clone: + type: object + required: + - cluster + properties: + cluster: + type: string + s3_endpoint: + type: string + s3_access_key_id: + type: string + s3_secret_access_key: + type: string + s3_force_path_style: + type: boolean + s3_wal_path: + type: string + timestamp: + type: string + pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$' + # The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC + # Example: 1996-12-19T16:39:57-08:00 + # Note: this field requires a timezone + uid: + format: uuid + type: string + connectionPooler: + type: object + properties: + dockerImage: + type: string + maxDBConnections: + type: integer + mode: + type: string + enum: + - "session" + - "transaction" + numberOfInstances: + type: integer + minimum: 2 + resources: + type: object + required: + - requests + - limits + properties: + limits: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + requests: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + schema: + type: string + user: + type: string + databases: + type: object + additionalProperties: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero. - storageClass: + # Note: usernames specified here as database owners must be declared in the users key of the spec key. + dockerImage: + type: string + enableConnectionPooler: + type: boolean + enableReplicaConnectionPooler: + type: boolean + enableLogicalBackup: + type: boolean + enableMasterLoadBalancer: + type: boolean + enableReplicaLoadBalancer: + type: boolean + enableShmVolume: + type: boolean + init_containers: # deprecated + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + initContainers: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + logicalBackupSchedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + maintenanceWindows: + type: array + items: type: string - subPath: + pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' + numberOfInstances: + type: integer + minimum: 0 + patroni: + type: object + properties: + initdb: + type: object + additionalProperties: + type: string + loop_wait: + type: integer + maximum_lag_on_failover: + type: integer + pg_hba: + type: array + items: + type: string + retry_timeout: + type: integer + slots: + type: object + additionalProperties: + type: object + additionalProperties: + type: string + synchronous_mode: + type: boolean + synchronous_mode_strict: + type: boolean + ttl: + type: integer + podAnnotations: + type: object + additionalProperties: type: string + pod_priority_class_name: # deprecated + type: string + podPriorityClassName: + type: string + postgresql: + type: object + required: + - version + properties: + version: + type: string + enum: + - "9.3" + - "9.4" + - "9.5" + - "9.6" + - "10" + - "11" + - "12" + - "13" + parameters: + type: object + additionalProperties: + type: string + preparedDatabases: + type: object + additionalProperties: + type: object + properties: + defaultUsers: + type: boolean + extensions: + type: object + additionalProperties: + type: string + schemas: + type: object + additionalProperties: + type: object + properties: + defaultUsers: + type: boolean + defaultRoles: + type: boolean + replicaLoadBalancer: # deprecated + type: boolean + resources: + type: object + required: + - requests + - limits + properties: + limits: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + # Decimal natural followed by m, or decimal natural followed by + # dot followed by up to three decimal digits. + # + # This is because the Kubernetes CPU resource has millis as the + # maximum precision. The actual values are checked in code + # because the regular expression would be huge and horrible and + # not very helpful in validation error messages; this one checks + # only the format of the given number. + # + # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + # Note: the value specified here must not be zero or be lower + # than the corresponding request. + memory: + type: string + # You can express memory as a plain integer or as a fixed-point + # integer using one of these suffixes: E, P, T, G, M, k. You can + # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki + # + # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + # Note: the value specified here must not be zero or be higher + # than the corresponding limit. + requests: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + schedulerName: + type: string + serviceAnnotations: + type: object + additionalProperties: + type: string + sidecars: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + spiloRunAsUser: + type: integer + spiloRunAsGroup: + type: integer + spiloFSGroup: + type: integer + standby: + type: object + required: + - s3_wal_path + properties: + s3_wal_path: + type: string + teamId: + type: string + tls: + type: object + required: + - secretName + properties: + secretName: + type: string + certificateFile: + type: string + privateKeyFile: + type: string + caFile: + type: string + caSecretName: + type: string + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + required: + - weight + - preference + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + format: int32 + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + tolerations: + type: array + items: + type: object + required: + - key + - operator + - effect + properties: + key: + type: string + operator: + type: string + enum: + - Equal + - Exists + value: + type: string + effect: + type: string + enum: + - NoExecute + - NoSchedule + - PreferNoSchedule + tolerationSeconds: + type: integer + useLoadBalancer: # deprecated + type: boolean + users: + type: object + additionalProperties: + type: array + nullable: true + description: "Role flags specified here must not contradict each other" + items: + type: string + enum: + - bypassrls + - BYPASSRLS + - nobypassrls + - NOBYPASSRLS + - createdb + - CREATEDB + - nocreatedb + - NOCREATEDB + - createrole + - CREATEROLE + - nocreaterole + - NOCREATEROLE + - inherit + - INHERIT + - noinherit + - NOINHERIT + - login + - LOGIN + - nologin + - NOLOGIN + - replication + - REPLICATION + - noreplication + - NOREPLICATION + - superuser + - SUPERUSER + - nosuperuser + - NOSUPERUSER + volume: + type: object + required: + - size + properties: + size: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + # Note: the value specified here must not be zero. + storageClass: + type: string + subPath: + type: string + status: + type: object + additionalProperties: + type: string diff --git a/charts/postgres-operator/crds/postgresteams.yaml b/charts/postgres-operator/crds/postgresteams.yaml new file mode 100644 index 000000000..fbf873b84 --- /dev/null +++ b/charts/postgres-operator/crds/postgresteams.yaml @@ -0,0 +1,72 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: postgresteams.acid.zalan.do + labels: + app.kubernetes.io/name: postgres-operator + annotations: + "helm.sh/hook": crd-install +spec: + group: acid.zalan.do + names: + kind: PostgresTeam + listKind: PostgresTeamList + plural: postgresteams + singular: postgresteam + shortNames: + - pgteam + categories: + - all + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - PostgresTeam + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + properties: + additionalSuperuserTeams: + type: object + description: "Map for teamId and associated additional superuser teams" + additionalProperties: + type: array + nullable: true + description: "List of teams to become Postgres superusers" + items: + type: string + additionalTeams: + type: object + description: "Map for teamId and associated additional teams" + additionalProperties: + type: array + nullable: true + description: "List of teams whose members will also be added to the Postgres cluster" + items: + type: string + additionalMembers: + type: object + description: "Map for teamId and associated additional users" + additionalProperties: + type: array + nullable: true + description: "List of users who will also be added to the Postgres cluster" + items: + type: string diff --git a/charts/postgres-operator/index.yaml b/charts/postgres-operator/index.yaml index 63c7b450d..6b64fd705 100644 --- a/charts/postgres-operator/index.yaml +++ b/charts/postgres-operator/index.yaml @@ -2,11 +2,33 @@ apiVersion: v1 entries: postgres-operator: - apiVersion: v1 - appVersion: 1.5.0 - created: "2020-05-04T16:36:19.646719041+02:00" + appVersion: 1.6.0 + created: "2020-12-17T16:16:25.639708821+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes - digest: 43510e4ed7005b2b80708df24cfbb0099b263b4a2954cff4e8f305543760be6d + digest: 2f5f527bae0a22b02f2f7b1e2352665cecf489a990e18212444fa34450b97604 + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + name: postgres-operator + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-1.6.0.tgz + version: 1.6.0 + - apiVersion: v1 + appVersion: 1.5.0 + created: "2020-12-17T16:16:25.637262877+01:00" + description: Postgres Operator creates and manages PostgreSQL clusters running + in Kubernetes + digest: 198351d5db52e65cdf383d6f3e1745d91ac1e2a01121f8476f8b1be728b09531 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -23,26 +45,4 @@ entries: urls: - postgres-operator-1.5.0.tgz version: 1.5.0 - - apiVersion: v1 - appVersion: 1.4.0 - created: "2020-05-04T16:36:19.645338751+02:00" - description: Postgres Operator creates and manages PostgreSQL clusters running - in Kubernetes - digest: f8b90fecfc3cb825b94ed17edd9d5cefc36ae61801d4568597b4a79bcd73b2e9 - home: https://github.com/zalando/postgres-operator - keywords: - - postgres - - operator - - cloud-native - - patroni - - spilo - maintainers: - - email: opensource@zalando.de - name: Zalando - name: postgres-operator - sources: - - https://github.com/zalando/postgres-operator - urls: - - postgres-operator-1.4.0.tgz - version: 1.4.0 -generated: "2020-05-04T16:36:19.643857452+02:00" +generated: "2020-12-17T16:16:25.635647131+01:00" diff --git a/charts/postgres-operator/postgres-operator-1.4.0.tgz b/charts/postgres-operator/postgres-operator-1.4.0.tgz deleted file mode 100644 index 88a187374..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.4.0.tgz and /dev/null differ diff --git a/charts/postgres-operator/postgres-operator-1.5.0.tgz b/charts/postgres-operator/postgres-operator-1.5.0.tgz index f575f7cfd..6e1a48ab7 100644 Binary files a/charts/postgres-operator/postgres-operator-1.5.0.tgz and b/charts/postgres-operator/postgres-operator-1.5.0.tgz differ diff --git a/charts/postgres-operator/postgres-operator-1.6.0.tgz b/charts/postgres-operator/postgres-operator-1.6.0.tgz new file mode 100644 index 000000000..ce19d5882 Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.6.0.tgz differ diff --git a/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml b/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml index ef607ae3c..b3f9f08f5 100644 --- a/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml +++ b/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml @@ -10,6 +10,27 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} rules: # Patroni needs to watch and manage endpoints +{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }} +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get +{{- else }} - apiGroups: - "" resources: @@ -23,6 +44,7 @@ rules: - patch - update - watch +{{- end }} # Patroni needs to watch pods - apiGroups: - "" diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index bd34e803e..165cce7c6 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -25,6 +25,15 @@ rules: - patch - update - watch +# operator only reads PostgresTeams +- apiGroups: + - acid.zalan.do + resources: + - postgresteams + verbs: + - get + - list + - watch # to create or get/update CRDs when starting up - apiGroups: - apiextensions.k8s.io @@ -35,13 +44,6 @@ rules: - get - patch - update -# to read configuration from ConfigMaps -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get # to send events to the CRs - apiGroups: - "" @@ -54,7 +56,35 @@ rules: - patch - update - watch -# to manage endpoints which are also used by Patroni +# to manage endpoints/configmaps which are also used by Patroni +{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }} +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get +{{- else }} +# to read configuration from ConfigMaps +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get - apiGroups: - "" resources: @@ -68,6 +98,7 @@ rules: - patch - update - watch +{{- end }} # to CRUD secrets for database access - apiGroups: - "" @@ -96,6 +127,10 @@ rules: - delete - get - list +{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }} + - patch + - update +{{- end }} # to read existing PVs. Creation should be done via dynamic provisioning - apiGroups: - "" @@ -104,7 +139,9 @@ rules: verbs: - get - list +{{- if toString .Values.configKubernetes.storage_resize_mode | eq "ebs" }} - update # only for resizing AWS volumes +{{- end }} # to watch Spilo pods and do rolling updates. Creation via StatefulSet - apiGroups: - "" diff --git a/charts/postgres-operator/templates/configmap.yaml b/charts/postgres-operator/templates/configmap.yaml index 64b55e0df..87fd752b1 100644 --- a/charts/postgres-operator/templates/configmap.yaml +++ b/charts/postgres-operator/templates/configmap.yaml @@ -9,6 +9,9 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} data: + {{- if .Values.podPriorityClassName }} + pod_priority_class_name: {{ .Values.podPriorityClassName }} + {{- end }} pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} {{ toYaml .Values.configGeneral | indent 2 }} {{ toYaml .Values.configUsers | indent 2 }} diff --git a/charts/postgres-operator/templates/deployment.yaml b/charts/postgres-operator/templates/deployment.yaml index 2d8eebcb3..9841bf1bc 100644 --- a/charts/postgres-operator/templates/deployment.yaml +++ b/charts/postgres-operator/templates/deployment.yaml @@ -37,6 +37,10 @@ spec: image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: + {{- if .Values.enableJsonLogging }} + - name: ENABLE_JSON_LOGGING + value: "true" + {{- end }} {{- if eq .Values.configTarget "ConfigMap" }} - name: CONFIG_MAP_NAME value: {{ template "postgres-operator.fullname" . }} diff --git a/charts/postgres-operator/templates/operatorconfiguration.yaml b/charts/postgres-operator/templates/operatorconfiguration.yaml index d28d68f9c..0625e1327 100644 --- a/charts/postgres-operator/templates/operatorconfiguration.yaml +++ b/charts/postgres-operator/templates/operatorconfiguration.yaml @@ -13,6 +13,9 @@ configuration: users: {{ toYaml .Values.configUsers | indent 4 }} kubernetes: + {{- if .Values.podPriorityClassName }} + pod_priority_class_name: {{ .Values.podPriorityClassName }} + {{- end }} pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} oauth_token_secret_name: {{ template "postgres-operator.fullname" . }} {{ toYaml .Values.configKubernetes | indent 4 }} diff --git a/charts/postgres-operator/templates/postgres-pod-priority-class.yaml b/charts/postgres-operator/templates/postgres-pod-priority-class.yaml new file mode 100644 index 000000000..7ee0f2e55 --- /dev/null +++ b/charts/postgres-operator/templates/postgres-pod-priority-class.yaml @@ -0,0 +1,15 @@ +{{- if .Values.podPriorityClassName }} +apiVersion: scheduling.k8s.io/v1 +description: 'Use only for databases controlled by Postgres operator' +kind: PriorityClass +metadata: + labels: + app.kubernetes.io/name: {{ template "postgres-operator.name" . }} + helm.sh/chart: {{ template "postgres-operator.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ .Values.podPriorityClassName }} +preemptionPolicy: PreemptLowerPriority +globalDefault: false +value: 1000000 +{{- end }} diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 4f57dd642..2ca1434e7 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.5.0 + tag: v1.6.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -21,14 +21,18 @@ configGeneral: enable_crd_validation: true # update only the statefulsets without immediately doing the rolling update enable_lazy_spilo_upgrade: false + # set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION + enable_pgversion_env_var: true # start any new database pod without limitations on shm memory enable_shm_volume: true + # enables backwards compatible path between Spilo 12 and Spilo 13 images + enable_spilo_wal_path_compat: false # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" # Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s) # kubernetes_use_configmaps: false # Spilo docker image - docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 # max number of instances in Postgres cluster. -1 = no limit min_instances: -1 # min number of instances in Postgres cluster. -1 = no limit @@ -45,7 +49,7 @@ configGeneral: # example: "exampleimage:exampletag" # number of routines the operator spawns to process requests concurrently - workers: 4 + workers: 8 # parameters describing Postgres users configUsers: @@ -67,6 +71,12 @@ configKubernetes: # keya: valuea # keyb: valueb + # key name for annotation that compares manifest value with current date + # delete_annotation_date_key: "delete-date" + + # key name for annotation that compares manifest value with cluster name + # delete_annotation_name_key: "delete-clustername" + # list of annotations propagated from cluster manifest to statefulset and deployment # downscaler_annotations: # - deployment-time @@ -83,7 +93,11 @@ configKubernetes: # namespaced name of the secret containing infrastructure roles names and passwords # infrastructure_roles_secret_name: postgresql-infrastructure-roles - # list of labels that can be inherited from the cluster manifest + # list of annotation keys that can be inherited from the cluster manifest + # inherited_annotations: + # - owned-by + + # list of label keys that can be inherited from the cluster manifest # inherited_labels: # - application # - environment @@ -104,6 +118,8 @@ configKubernetes: pod_antiaffinity_topology_key: "kubernetes.io/hostname" # namespaced name of the ConfigMap with environment variables to populate on every pod # pod_environment_configmap: "default/my-custom-config" + # name of the Secret (in cluster namespace) with environment variables to populate on every pod + # pod_environment_secret: "my-custom-secret" # specify the pod management policy of stateful sets of Postgres clusters pod_management_policy: "ordered_ready" @@ -119,11 +135,16 @@ configKubernetes: pod_terminate_grace_period: 5m # template for database user secrets generated by the operator secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + # set user and group for the spilo container (required to run Spilo as non-root process) + # spilo_runasuser: "101" + # spilo_runasgroup: "103" # group ID with write-access to volumes (required to run Spilo as non-root process) # spilo_fsgroup: 103 # whether the Spilo container should run in privileged mode spilo_privileged: false + # storage resize strategy, available options are: ebs, pvc, off + storage_resize_mode: pvc # operator watches for postgres objects in the given namespace watched_namespace: "*" # listen to all namespaces @@ -170,6 +191,8 @@ configLoadBalancer: enable_master_load_balancer: false # toggles service type load balancer pointing to the replica pod of the cluster enable_replica_load_balancer: false + # define external traffic policy for the load balancer + external_traffic_policy: "Cluster" # defines the DNS name string template for the master load balancer cluster master_dns_name_format: "{cluster}.{team}.{hostedzone}" # defines the DNS name string template for the replica load balancer cluster @@ -202,19 +225,30 @@ configAwsOrGcp: # AWS region used to store ESB volumes aws_region: eu-central-1 + # enable automatic migration on AWS from gp2 to gp3 volumes + enable_ebs_gp3_migration: false + # defines maximum volume size in GB until which auto migration happens + # enable_ebs_gp3_migration_max_size: 1000 + + # GCP credentials that will be used by the operator / pods + # gcp_credentials: "" + # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods # kube_iam_role: "" # S3 bucket to use for shipping postgres daily logs # log_s3_bucket: "" + # GCS bucket to use for shipping WAL segments with WAL-E + # wal_gs_bucket: "" + # S3 bucket to use for shipping WAL segments with WAL-E # wal_s3_bucket: "" # configure K8s cron job managed by the operator configLogicalBackup: # image for pods of the logical backup job (example runs pg_dumpall) - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:master-58" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0" # S3 Access Key ID logical_backup_s3_access_key_id: "" # S3 bucket to store backup results @@ -235,6 +269,11 @@ configTeamsApi: # team_admin_role will have the rights to grant roles coming from PG manifests # enable_admin_role_for_users: true + # operator watches for PostgresTeam CRs to assign additional teams and members to clusters + enable_postgres_team_crd: false + # toogle to create additional superuser teams from PostgresTeam CRs + # enable_postgres_team_crd_superusers: false + # toggle to grant superuser to team members created from the Teams API enable_team_superuser: false # toggles usage of the Teams API by the operator @@ -265,7 +304,7 @@ configConnectionPooler: # db user for pooler to use connection_pooler_user: "pooler" # docker image - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7" + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9" # max db connections the pooler should hold connection_pooler_max_db_connections: 60 # default pooling mode @@ -299,8 +338,12 @@ podServiceAccount: # If not set a name is generated using the fullname template and "-pod" suffix name: "postgres-pod" +# priority class for operator pod priorityClassName: "" +# priority class for database pods +podPriorityClassName: "" + resources: limits: cpu: 500m diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 2a6a181f5..3a84fae59 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.5.0 + tag: v1.6.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -15,20 +15,27 @@ podLabels: {} configTarget: "ConfigMap" +# JSON logging format +enableJsonLogging: false + # general configuration parameters configGeneral: # choose if deployment creates/updates CRDs with OpenAPIV3Validation enable_crd_validation: "true" # update only the statefulsets without immediately doing the rolling update enable_lazy_spilo_upgrade: "false" + # set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION + enable_pgversion_env_var: "true" # start any new database pod without limitations on shm memory enable_shm_volume: "true" + # enables backwards compatible path between Spilo 12 and Spilo 13 images + enable_spilo_wal_path_compat: "false" # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" # Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s) # kubernetes_use_configmaps: "false" # Spilo docker image - docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 # max number of instances in Postgres cluster. -1 = no limit min_instances: "-1" # min number of instances in Postgres cluster. -1 = no limit @@ -44,7 +51,7 @@ configGeneral: # sidecar_docker_images: "" # number of routines the operator spawns to process requests concurrently - workers: "4" + workers: "8" # parameters describing Postgres users configUsers: @@ -63,6 +70,12 @@ configKubernetes: # annotations attached to each database pod # custom_pod_annotations: "keya:valuea,keyb:valueb" + # key name for annotation that compares manifest value with current date + # delete_annotation_date_key: "delete-date" + + # key name for annotation that compares manifest value with cluster name + # delete_annotation_name_key: "delete-clustername" + # list of annotations propagated from cluster manifest to statefulset and deployment # downscaler_annotations: "deployment-time,downscaler/*" @@ -77,7 +90,10 @@ configKubernetes: # namespaced name of the secret containing infrastructure roles names and passwords # infrastructure_roles_secret_name: postgresql-infrastructure-roles - # list of labels that can be inherited from the cluster manifest + # list of annotation keys that can be inherited from the cluster manifest + # inherited_annotations: owned-by + + # list of label keys that can be inherited from the cluster manifest # inherited_labels: application,environment # timeout for successful migration of master pods from unschedulable node @@ -95,6 +111,8 @@ configKubernetes: pod_antiaffinity_topology_key: "kubernetes.io/hostname" # namespaced name of the ConfigMap with environment variables to populate on every pod # pod_environment_configmap: "default/my-custom-config" + # name of the Secret (in cluster namespace) with environment variables to populate on every pod + # pod_environment_secret: "my-custom-secret" # specify the pod management policy of stateful sets of Postgres clusters pod_management_policy: "ordered_ready" @@ -110,11 +128,16 @@ configKubernetes: pod_terminate_grace_period: 5m # template for database user secrets generated by the operator secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + # set user and group for the spilo container (required to run Spilo as non-root process) + # spilo_runasuser: "101" + # spilo_runasgroup: "103" # group ID with write-access to volumes (required to run Spilo as non-root process) # spilo_fsgroup: "103" # whether the Spilo container should run in privileged mode spilo_privileged: "false" + # storage resize strategy, available options are: ebs, pvc, off + storage_resize_mode: pvc # operator watches for postgres objects in the given namespace watched_namespace: "*" # listen to all namespaces @@ -159,6 +182,8 @@ configLoadBalancer: enable_master_load_balancer: "false" # toggles service type load balancer pointing to the replica pod of the cluster enable_replica_load_balancer: "false" + # define external traffic policy for the load balancer + external_traffic_policy: "Cluster" # defines the DNS name string template for the master load balancer cluster master_dns_name_format: '{cluster}.{team}.{hostedzone}' # defines the DNS name string template for the replica load balancer cluster @@ -191,6 +216,14 @@ configAwsOrGcp: # AWS region used to store ESB volumes aws_region: eu-central-1 + # enable automatic migration on AWS from gp2 to gp3 volumes + enable_ebs_gp3_migration: "false" + # defines maximum volume size in GB until which auto migration happens + # enable_ebs_gp3_migration_max_size: "1000" + + # GCP credentials for setting the GOOGLE_APPLICATION_CREDNETIALS environment variable + # gcp_credentials: "" + # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods # kube_iam_role: "" @@ -200,10 +233,13 @@ configAwsOrGcp: # S3 bucket to use for shipping WAL segments with WAL-E # wal_s3_bucket: "" + # GCS bucket to use for shipping WAL segments with WAL-E + # wal_gs_bucket: "" + # configure K8s cron job managed by the operator configLogicalBackup: # image for pods of the logical backup job (example runs pg_dumpall) - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:master-58" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v.1.6.0" # S3 Access Key ID logical_backup_s3_access_key_id: "" # S3 bucket to store backup results @@ -224,6 +260,11 @@ configTeamsApi: # team_admin_role will have the rights to grant roles coming from PG manifests # enable_admin_role_for_users: "true" + # operator watches for PostgresTeam CRs to assign additional teams and members to clusters + enable_postgres_team_crd: "false" + # toogle to create additional superuser teams from PostgresTeam CRs + # enable_postgres_team_crd_superusers: "false" + # toggle to grant superuser to team members created from the Teams API # enable_team_superuser: "false" @@ -257,7 +298,7 @@ configConnectionPooler: # db user for pooler to use connection_pooler_user: "pooler" # docker image - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7" + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9" # max db connections the pooler should hold connection_pooler_max_db_connections: "60" # default pooling mode @@ -291,8 +332,12 @@ podServiceAccount: # If not set a name is generated using the fullname template and "-pod" suffix name: "postgres-pod" +# priority class for operator pod priorityClassName: "" +# priority class for database pods +podPriorityClassName: "" + resources: limits: cpu: 500m diff --git a/cmd/main.go b/cmd/main.go index a178c187e..376df0bad 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -2,7 +2,7 @@ package main import ( "flag" - "log" + log "github.com/sirupsen/logrus" "os" "os/signal" "sync" @@ -36,6 +36,8 @@ func init() { flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API") flag.Parse() + config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true" + configMapRawName := os.Getenv("CONFIG_MAP_NAME") if configMapRawName != "" { @@ -63,6 +65,9 @@ func init() { func main() { var err error + if config.EnableJsonLogging { + log.SetFormatter(&log.JSONFormatter{}) + } log.SetOutput(os.Stdout) log.Printf("Spilo operator %s\n", version) diff --git a/delivery.yaml b/delivery.yaml index d0884f982..d0a689b8b 100644 --- a/delivery.yaml +++ b/delivery.yaml @@ -2,6 +2,10 @@ version: "2017-09-20" pipeline: - id: build-postgres-operator type: script + vm: large + cache: + paths: + - /go/pkg/mod commands: - desc: 'Update' cmd: | @@ -12,7 +16,7 @@ pipeline: - desc: 'Install go' cmd: | cd /tmp - wget -q https://storage.googleapis.com/golang/go1.14.linux-amd64.tar.gz -O go.tar.gz + wget -q https://storage.googleapis.com/golang/go1.15.6.linux-amd64.tar.gz -O go.tar.gz tar -xf go.tar.gz mv go /usr/local ln -s /usr/local/go/bin/go /usr/bin/go @@ -28,7 +32,7 @@ pipeline: IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test fi export IMAGE - make deps docker + make deps mocks docker - desc: 'Run unit tests' cmd: | export PATH=$PATH:$HOME/go/bin @@ -76,3 +80,15 @@ pipeline: export IMAGE make docker make push + + - id: build-logical-backup + type: script + + commands: + - desc: Build image + cmd: | + cd docker/logical-backup + export TAG=$(git describe --tags --always --dirty) + IMAGE="registry-write.opensource.zalan.do/acid/logical-backup" + docker build --rm -t "$IMAGE:$TAG$CDP_TAG" . + docker push "$IMAGE:$TAG$CDP_TAG" diff --git a/docker/Dockerfile b/docker/Dockerfile index 520fd2d07..4c47eb346 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -2,6 +2,7 @@ FROM alpine MAINTAINER Team ACID @ Zalando # We need root certificates to deal with teams api over https +RUN apk --no-cache add curl RUN apk --no-cache add ca-certificates COPY build/* / diff --git a/docker/logical-backup/Dockerfile b/docker/logical-backup/Dockerfile index 94c524381..c8bbe6f28 100644 --- a/docker/logical-backup/Dockerfile +++ b/docker/logical-backup/Dockerfile @@ -13,12 +13,16 @@ RUN apt-get update \ curl \ jq \ gnupg \ + gcc \ + libffi-dev \ && pip3 install --no-cache-dir awscli --upgrade \ + && pip3 install --no-cache-dir gsutil --upgrade \ && echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ && cat /etc/apt/sources.list.d/pgdg.list \ && curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && apt-get update \ && apt-get install --no-install-recommends -y \ + postgresql-client-13 \ postgresql-client-12 \ postgresql-client-11 \ postgresql-client-10 \ diff --git a/docker/logical-backup/dump.sh b/docker/logical-backup/dump.sh index 2d9a39e02..50f7e6e4c 100755 --- a/docker/logical-backup/dump.sh +++ b/docker/logical-backup/dump.sh @@ -46,6 +46,23 @@ function aws_upload { aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" } +function gcs_upload { + PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz + + gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP" +} + +function upload { + case $LOGICAL_BACKUP_PROVIDER in + "gcs") + gcs_upload + ;; + *) + aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF)) + ;; + esac +} + function get_pods { declare -r SELECTOR="$1" @@ -93,7 +110,7 @@ for search in "${search_strategy[@]}"; do done set -x -dump | compress | aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF)) +dump | compress | upload [[ ${PIPESTATUS[0]} != 0 || ${PIPESTATUS[1]} != 0 || ${PIPESTATUS[2]} != 0 ]] && (( ERRORCOUNT += 1 )) set +x diff --git a/docs/administrator.md b/docs/administrator.md index 45a328d38..396629b1a 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -11,17 +11,29 @@ switchover (planned failover) of the master to the Pod with new minor version. The switch should usually take less than 5 seconds, still clients have to reconnect. -Major version upgrades are supported via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster). -The new cluster manifest must have a higher `version` string than the source -cluster and will be created from a basebackup. Depending of the cluster size, -downtime in this case can be significant as writes to the database should be -stopped and all WAL files should be archived first before cloning is started. +Major version upgrades are supported either via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster) +or in-place. -Note, that simply changing the version string in the `postgresql` manifest does -not work at present and leads to errors. Neither Patroni nor Postgres Operator -can do in place `pg_upgrade`. Still, it can be executed manually in the Postgres -container, which is tricky (i.e. systems need to be stopped, replicas have to be -synced) but of course faster than cloning. +With cloning, the new cluster manifest must have a higher `version` string than +the source cluster and will be created from a basebackup. Depending of the +cluster size, downtime in this case can be significant as writes to the database +should be stopped and all WAL files should be archived first before cloning is +started. + +Starting with Spilo 13, Postgres Operator can do in-place major version upgrade, +which should be faster than cloning. However, it is not fully automatic yet. +First, you need to make sure, that setting the `PGVERSION` environment variable +is enabled in the configuration. Since `v1.6.0`, `enable_pgversion_env_var` is +enabled by default. + +To trigger the upgrade, increase the version in the cluster manifest. After +Pods are rotated `configure_spilo` will notice the version mismatch and start +the old version again. You can then exec into the Postgres container of the +master instance and call `python3 /scripts/inplace_upgrade.py N` where `N` +is the number of members of your cluster (see `number_of_instances`). The +upgrade is usually fast, well under one minute for most DBs. Note, that changes +become irrevertible once `pg_upgrade` is called. To understand the upgrade +procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488). ## CRD Validation @@ -44,7 +56,7 @@ Once the validation is enabled it can only be disabled manually by editing or patching the CRD manifest: ```bash -zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}' +kubectl patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}' ``` ## Non-default cluster domain @@ -123,6 +135,68 @@ Every other Postgres cluster which lacks the annotation will be ignored by this operator. Conversely, operators without a defined `CONTROLLER_ID` will ignore clusters with defined ownership of another operator. +## Delete protection via annotations + +To avoid accidental deletes of Postgres clusters the operator can check the +manifest for two existing annotations containing the cluster name and/or the +current date (in YYYY-MM-DD format). The name of the annotation keys can be +defined in the configuration. By default, they are not set which disables the +delete protection. Thus, one could choose to only go with one annotation. + +**postgres-operator ConfigMap** + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-operator +data: + delete_annotation_date_key: "delete-date" + delete_annotation_name_key: "delete-clustername" +``` + +**OperatorConfiguration** + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: OperatorConfiguration +metadata: + name: postgresql-operator-configuration +configuration: + kubernetes: + delete_annotation_date_key: "delete-date" + delete_annotation_name_key: "delete-clustername" +``` + +Now, every cluster manifest must contain the configured annotation keys to +trigger the delete process when running `kubectl delete pg`. Note, that the +`Postgresql` resource would still get deleted as K8s' API server does not +block it. Only the operator logs will tell, that the delete criteria wasn't +met. + +**cluster manifest** + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: demo-cluster + annotations: + delete-date: "2020-08-31" + delete-clustername: "demo-cluster" +spec: + ... +``` + +In case, the resource has been deleted accidentally or the annotations were +simply forgotten, it's safe to recreate the cluster with `kubectl create`. +Existing Postgres cluster are not replaced by the operator. But, as the +original cluster still exists the status will show `CreateFailed` at first. +On the next sync event it should change to `Running`. However, as it is in +fact a new resource for K8s, the UID will differ which can trigger a rolling +update of the pods because the UID is used as part of backup path to S3. + + ## Role-based access control for the operator The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml) @@ -319,11 +393,18 @@ spec: ## Custom Pod Environment Variables - -It is possible to configure a ConfigMap which is used by the Postgres pods as +It is possible to configure a ConfigMap as well as a Secret which are used by the Postgres pods as an additional provider for environment variables. One use case is to customize -the Spilo image and configure it with environment variables. The ConfigMap with -the additional settings is referenced in the operator's main configuration. +the Spilo image and configure it with environment variables. Another case could be to provide custom +cloud provider or backup settings. + +In general the Operator will give preference to the globally configured variables, to not have the custom +ones interfere with core functionality. Variables with the 'WAL_' and 'LOG_' prefix can be overwritten though, to allow +backup and logshipping to be specified differently. + + +### Via ConfigMap +The ConfigMap with the additional settings is referenced in the operator's main configuration. A namespace can be specified along with the name. If left out, the configured default namespace of your K8s client will be used and if the ConfigMap is not found there, the Postgres cluster's namespace is taken when different: @@ -365,7 +446,54 @@ data: MY_CUSTOM_VAR: value ``` -This ConfigMap is then added as a source of environment variables to the +The key-value pairs of the ConfigMap are then added as environment variables to the +Postgres StatefulSet/pods. + + +### Via Secret +The Secret with the additional variables is referenced in the operator's main configuration. +To protect the values of the secret from being exposed in the pod spec they are each referenced +as SecretKeyRef. +This does not allow for the secret to be in a different namespace as the pods though + +**postgres-operator ConfigMap** + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-operator +data: + # referencing secret with custom environment variables + pod_environment_secret: postgres-pod-secrets +``` + +**OperatorConfiguration** + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: OperatorConfiguration +metadata: + name: postgresql-operator-configuration +configuration: + kubernetes: + # referencing secret with custom environment variables + pod_environment_secret: postgres-pod-secrets +``` + +**referenced Secret `postgres-pod-secrets`** + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres-pod-secrets + namespace: default +data: + MY_CUSTOM_VAR: dmFsdWU= +``` + +The key-value pairs of the Secret are all accessible as environment variables to the Postgres StatefulSet/pods. ## Limiting the number of min and max instances in clusters @@ -445,9 +573,12 @@ database. * **Human users** originate from the [Teams API](user.md#teams-api-roles) that returns a list of the team members given a team id. The operator differentiates between (a) product teams that own a particular Postgres cluster and are granted -admin rights to maintain it, and (b) Postgres superuser teams that get the -superuser access to all Postgres databases running in a K8s cluster for the -purposes of maintaining and troubleshooting. +admin rights to maintain it, (b) Postgres superuser teams that get superuser +access to all Postgres databases running in a K8s cluster for the purposes of +maintaining and troubleshooting, and (c) additional teams, superuser teams or +members associated with the owning team. The latter is managed via the +[PostgresTeam CRD](user.md#additional-teams-and-members-per-cluster). + ## Understanding rolling update of Spilo pods @@ -518,6 +649,83 @@ A secret can be pre-provisioned in different ways: * Automatically provisioned via a custom K8s controller like [kube-aws-iam-controller](https://github.com/mikkeloscar/kube-aws-iam-controller) +## Google Cloud Platform setup + +To configure the operator on GCP there are some prerequisites that are needed: + +* A service account with the proper IAM setup to access the GCS bucket for the WAL-E logs +* The credentials file for the service account. + +The configuration paramaters that we will be using are: + +* `additional_secret_mount` +* `additional_secret_mount_path` +* `gcp_credentials` +* `wal_gs_bucket` + +### Generate a K8s secret resource + +Generate the K8s secret resource that will contain your service account's +credentials. It's highly recommended to use a service account and limit its +scope to just the WAL-E bucket. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: psql-wale-creds + namespace: default +type: Opaque +stringData: + key.json: |- + +``` + +### Setup your operator configuration values + +With the `psql-wale-creds` resource applied to your cluster, ensure that +the operator's configuration is set up like the following: + +```yml +... +aws_or_gcp: + additional_secret_mount: "pgsql-wale-creds" + additional_secret_mount_path: "/var/secrets/google" # or where ever you want to mount the file + # aws_region: eu-central-1 + # kube_iam_role: "" + # log_s3_bucket: "" + # wal_s3_bucket: "" + wal_gs_bucket: "postgres-backups-bucket-28302F2" # name of bucket on where to save the WAL-E logs + gcp_credentials: "/var/secrets/google/key.json" # combination of the mount path & key in the K8s resource. (i.e. key.json) +... +``` + +### Setup pod environment configmap + +To make postgres-operator work with GCS, use following configmap: +```yml +apiVersion: v1 +kind: ConfigMap +metadata: + name: pod-env-overrides + namespace: postgres-operator-system +data: + # Any env variable used by spilo can be added + USE_WALG_BACKUP: "true" + USE_WALG_RESTORE: "true" + CLONE_USE_WALG_RESTORE: "true" +``` +This configmap will instruct operator to use WAL-G, instead of WAL-E, for backup and restore. + +Then provide this configmap in postgres-operator settings: +```yml +... +# namespaced name of the ConfigMap with environment variables to populate on every pod +pod_environment_configmap: "postgres-operator-system/pod-env-overrides" +... +``` + + ## Sidecars for Postgres clusters A list of sidecars is added to each cluster created by the operator. The default diff --git a/docs/developer.md b/docs/developer.md index 6e0fc33c8..3316ac4cc 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -237,9 +237,11 @@ kubectl logs acid-minimal-cluster-0 ## End-to-end tests -The operator provides reference end-to-end tests (e2e) (as Docker image) to -ensure various infrastructure parts work smoothly together. Each e2e execution -tests a Postgres Operator image built from the current git branch. The test +The operator provides reference end-to-end (e2e) tests to +ensure various infrastructure parts work smoothly together. The test code is available at `e2e/tests`. +The special `registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build. + +Each e2e execution tests a Postgres Operator image built from the current git branch. The test runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/), utilizes provided manifest examples, and runs e2e tests contained in the `tests` folder. The K8s API client in the container connects to the `kind` cluster via @@ -284,7 +286,7 @@ manifest files: Postgres manifest parameters are defined in the [api package](../pkg/apis/acid.zalan.do/v1/postgresql_type.go). The operator behavior has to be implemented at least in [k8sres.go](../pkg/cluster/k8sres.go). -Validation of CRD parameters is controlled in [crd.go](../pkg/apis/acid.zalan.do/v1/crds.go). +Validation of CRD parameters is controlled in [crds.go](../pkg/apis/acid.zalan.do/v1/crds.go). Please, reflect your changes in tests, for example in: * [config_test.go](../pkg/util/config/config_test.go) * [k8sres_test.go](../pkg/cluster/k8sres_test.go) diff --git a/docs/diagrams/neutral_operator.excalidraw b/docs/diagrams/neutral_operator.excalidraw new file mode 100644 index 000000000..f9e48aec1 --- /dev/null +++ b/docs/diagrams/neutral_operator.excalidraw @@ -0,0 +1,3499 @@ +{ + "type": "excalidraw", + "version": 2, + "source": "https://excalidraw.com", + "elements": [ + { + "id": "HJnnb_r0hPUKoBEINbers", + "type": "ellipse", + "x": 273, + "y": 517.75, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 10898020, + "version": 258, + "versionNonce": 298931548, + "isDeleted": false + }, + { + "id": "tCDf1dMVyFkty_0jKAnZs", + "type": "line", + "x": 273, + "y": 531.75, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1834520924, + "version": 237, + "versionNonce": 1077299676, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "nA3ZdlWP2zjjNACKfYs-d", + "type": "line", + "x": 395, + "y": 532.75, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1434407004, + "version": 289, + "versionNonce": 789098076, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "vtgct6qIZTm4sYOD92wKg", + "type": "ellipse", + "x": 274, + "y": 602.75, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 2141653860, + "version": 264, + "versionNonce": 1327137500, + "isDeleted": false + }, + { + "id": "mOLA3EYJz1RciiXTcNzKd", + "type": "text", + "x": 305, + "y": 654.25, + "width": 56, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1191437788, + "version": 171, + "versionNonce": 640281700, + "isDeleted": false, + "text": "pod-0", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "LKNTYzb6pb0XqqRf2YNv9", + "type": "ellipse", + "x": 539, + "y": 523.25, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 223989476, + "version": 308, + "versionNonce": 453278684, + "isDeleted": false + }, + { + "id": "75R3P1ZFskWD8-1ssBxzK", + "type": "line", + "x": 539, + "y": 537.25, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1763311964, + "version": 287, + "versionNonce": 1651949540, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "RT5N8ktBKNNZFEGC5HrUk", + "type": "line", + "x": 663.5, + "y": 538.25, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1317425764, + "version": 340, + "versionNonce": 966842852, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "dfEllTQv2I7GjGNlxLtO7", + "type": "ellipse", + "x": 540, + "y": 608.25, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 25785820, + "version": 314, + "versionNonce": 886907748, + "isDeleted": false + }, + { + "id": "jsYpTmNMxbY44mytnrs1Q", + "type": "ellipse", + "x": 735, + "y": 521.25, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 197655268, + "version": 290, + "versionNonce": 1837380828, + "isDeleted": false + }, + { + "id": "D5XP-OpR0GnxMkHaVvFR2", + "type": "line", + "x": 735, + "y": 535.25, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1895077212, + "version": 269, + "versionNonce": 1135285988, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "GSsk0CDMtzw5RPe6jYwQG", + "type": "line", + "x": 857, + "y": 536.25, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 911146596, + "version": 325, + "versionNonce": 1902197084, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "MYgWwh6xIpAnWKGvUPxaR", + "type": "ellipse", + "x": 736, + "y": 606.25, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1492224476, + "version": 296, + "versionNonce": 997104228, + "isDeleted": false + }, + { + "id": "Mgil_EoL7vCEANAAQbeT9", + "type": "text", + "x": 220, + "y": 686.25, + "width": 166, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 573888220, + "version": 166, + "versionNonce": 1814670812, + "isDeleted": false, + "text": "spilo-role=master", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "yeRW34kgnJTZIZlScfwrn", + "type": "text", + "x": 523.5, + "y": 689.25, + "width": 165, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1364782300, + "version": 245, + "versionNonce": 116764132, + "isDeleted": false, + "text": "spilo-role=replica", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "dyn57jMr5lf-PlaHz_aQC", + "type": "text", + "x": 579, + "y": 667.25, + "width": 44, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1855903580, + "version": 176, + "versionNonce": 1324323420, + "isDeleted": false, + "text": "pod-1", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "H3QMgY9OZFFTnObVeqsin", + "type": "text", + "x": 775, + "y": 659.25, + "width": 56, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1891081700, + "version": 214, + "versionNonce": 122573156, + "isDeleted": false, + "text": "pod-2", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "xTZVls7LlMTme9sH-DYxP", + "type": "text", + "x": 720.5, + "y": 691.25, + "width": 165, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1213607260, + "version": 314, + "versionNonce": 411363036, + "isDeleted": false, + "text": "spilo-role=replica", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "02J48ELeakCl3ignRYIBB", + "type": "draw", + "x": 994, + "y": 889.75, + "width": 456, + "height": 202, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 83904484, + "version": 269, + "versionNonce": 1271301348, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 2, + -10 + ], + [ + 0, + -13 + ], + [ + 0, + -15 + ], + [ + -6, + -21 + ], + [ + -19, + -27 + ], + [ + -32, + -27 + ], + [ + -47, + -14 + ], + [ + -54, + -13 + ], + [ + -59, + -19 + ], + [ + -81, + -32 + ], + [ + -98, + -36 + ], + [ + -128, + -38 + ], + [ + -142, + -35 + ], + [ + -152, + -31 + ], + [ + -168, + -21 + ], + [ + -183, + -4 + ], + [ + -184, + -4 + ], + [ + -195, + -19 + ], + [ + -200, + -23 + ], + [ + -207, + -26 + ], + [ + -219, + -27 + ], + [ + -283, + -2 + ], + [ + -296, + 8 + ], + [ + -299, + 12 + ], + [ + -300, + 26 + ], + [ + -299, + 28 + ], + [ + -299, + 25 + ], + [ + -312, + 25 + ], + [ + -323, + 27 + ], + [ + -335, + 32 + ], + [ + -343, + 38 + ], + [ + -361, + 65 + ], + [ + -368, + 80 + ], + [ + -371, + 97 + ], + [ + -369, + 105 + ], + [ + -366, + 110 + ], + [ + -352, + 118 + ], + [ + -344, + 119 + ], + [ + -336, + 118 + ], + [ + -316, + 109 + ], + [ + -310, + 104 + ], + [ + -309, + 101 + ], + [ + -308, + 115 + ], + [ + -305, + 130 + ], + [ + -296, + 144 + ], + [ + -282, + 159 + ], + [ + -274, + 163 + ], + [ + -262, + 164 + ], + [ + -240, + 163 + ], + [ + -210, + 153 + ], + [ + -173, + 139 + ], + [ + -137, + 118 + ], + [ + -134, + 115 + ], + [ + -129, + 121 + ], + [ + -114, + 144 + ], + [ + -98, + 154 + ], + [ + -86, + 157 + ], + [ + -61, + 157 + ], + [ + -36, + 153 + ], + [ + -16, + 147 + ], + [ + -8, + 143 + ], + [ + -6, + 136 + ], + [ + -5, + 112 + ], + [ + -6, + 106 + ], + [ + 3, + 119 + ], + [ + 8, + 122 + ], + [ + 17, + 124 + ], + [ + 26, + 123 + ], + [ + 57, + 111 + ], + [ + 74, + 100 + ], + [ + 80, + 92 + ], + [ + 83, + 77 + ], + [ + 76, + 57 + ], + [ + 69, + 43 + ], + [ + 83, + 31 + ], + [ + 85, + 23 + ], + [ + 73, + 3 + ], + [ + 67, + -5 + ], + [ + 47, + -15 + ], + [ + 25, + -12 + ], + [ + 0, + 0 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "VZp483EWEr7EJ_FlHJk0a", + "type": "text", + "x": 665.75, + "y": 931.5, + "width": 386, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 224990820, + "version": 195, + "versionNonce": 1620426212, + "isDeleted": false, + "text": "External Storage: S3, GCS", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "BrLh-5pM2Jhx-5_Vep5-G", + "type": "arrow", + "x": 393, + "y": 729.75, + "width": 262, + "height": 163, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 310475876, + "version": 138, + "versionNonce": 1240221796, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 262, + 163 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "UfPfV9MFcDCPYI-jf7seb", + "type": "text", + "x": 381.5, + "y": 814.75, + "width": 190, + "height": 25, + "angle": 0.45141580316417595, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [ + "3u9rzicVh0QO2xN6fmhVp" + ], + "seed": 1292594020, + "version": 326, + "versionNonce": 1344659420, + "isDeleted": false, + "text": "Nightly Basebackup", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "SmcG2YbgL-8v4clAUp0xh", + "type": "text", + "x": 346.75, + "y": 846.25, + "width": 225, + "height": 25, + "angle": 0.4312915734727083, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [ + "3u9rzicVh0QO2xN6fmhVp" + ], + "seed": 1782203356, + "version": 325, + "versionNonce": 361602020, + "isDeleted": false, + "text": "Write Ahead Log (WAL)", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "_Kyp73xUT4mahN8JdoGcS", + "type": "diamond", + "x": 277, + "y": 412.75, + "width": 112, + "height": 37, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 270461276, + "version": 139, + "versionNonce": 144016476, + "isDeleted": false + }, + { + "id": "Bmee_A3CCXFMs_Jo4fDwh", + "type": "diamond", + "x": 638, + "y": 404.75, + "width": 127, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1791880796, + "version": 172, + "versionNonce": 414744420, + "isDeleted": false + }, + { + "id": "AwIhDdIZFowEQWpatIS_J", + "type": "text", + "x": 265, + "y": 376.25, + "width": 146, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 446241244, + "version": 143, + "versionNonce": 1285331164, + "isDeleted": false, + "text": "Master Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "zYWhLoCY5QQJkDhjKtZMM", + "type": "text", + "x": 623.5, + "y": 374.25, + "width": 149, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1243730268, + "version": 137, + "versionNonce": 1270543076, + "isDeleted": false, + "text": "Replica Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "IPdSpq4-kBLUQkvNDjyXi", + "type": "arrow", + "x": 334.5, + "y": 452.25, + "width": 4.965440128473574, + "height": 57.669431607425224, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1700066276, + "version": 127, + "versionNonce": 935235548, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -4.965440128473574, + 57.669431607425224 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "05IcxTgNq-2qmCLEERhJz", + "type": "arrow", + "x": 698, + "y": 448.75, + "width": 93, + "height": 59, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1235196764, + "version": 112, + "versionNonce": 1831007844, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -93, + 59 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "OzTNSlxHW3EHNyqPewfNh", + "type": "arrow", + "x": 705, + "y": 444.75, + "width": 90, + "height": 70, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1509319268, + "version": 124, + "versionNonce": 330562916, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 90, + 70 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "0ocAfgcF_lOPK9OGRgqVZ", + "type": "text", + "x": 947.75, + "y": 215, + "width": 300, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1404120284, + "version": 182, + "versionNonce": 1435274980, + "isDeleted": false, + "text": "K8s account/network boundary", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "MY4EMfFbYicpuS02Xsrz5", + "type": "rectangle", + "x": 306, + "y": 206.75, + "width": 91, + "height": 41, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1814514020, + "version": 128, + "versionNonce": 614182244, + "isDeleted": false + }, + { + "id": "ymXiakDGTbDBDdbumOMAM", + "type": "text", + "x": 276.5, + "y": 172.25, + "width": 143, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 111950308, + "version": 140, + "versionNonce": 108395612, + "isDeleted": false, + "text": "Load Balancer", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "KbmHbQP5KiwSuGmdHvMPG", + "type": "rectangle", + "x": 655.25, + "y": 206.5, + "width": 91, + "height": 41, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 571247452, + "version": 180, + "versionNonce": 252207332, + "isDeleted": false + }, + { + "id": "EpO40F5rsuuBJFSu77u5n", + "type": "text", + "x": 625.75, + "y": 172, + "width": 143, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 995715172, + "version": 192, + "versionNonce": 438990044, + "isDeleted": false, + "text": "Load Balancer", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "JI02M9qfU4tMF5xh8ZxUg", + "type": "line", + "x": 408, + "y": 224.75, + "width": 238, + "height": 2, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1266200156, + "version": 141, + "versionNonce": 9610340, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 238, + 2 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "zTkeS6HMU9Ne-W4IQppVG", + "type": "line", + "x": 756, + "y": 225.75, + "width": 177, + "height": 0, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 2023140068, + "version": 117, + "versionNonce": 44254172, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 177, + 0 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "UqDNg7adJeN1tJ7o6cuMM", + "type": "line", + "x": 299, + "y": 223.75, + "width": 172, + "height": 4, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1114792676, + "version": 124, + "versionNonce": 1594390500, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -172, + 4 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "zsE606bqp5qqQi7xyFI6R", + "type": "arrow", + "x": 343, + "y": 254.75, + "width": 4, + "height": 111, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 650104412, + "version": 137, + "versionNonce": 1304848476, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -4, + 111 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "yVLFLudgVoRUdAxFNJC_z", + "type": "arrow", + "x": 698, + "y": 255.75, + "width": 0.8342433616519429, + "height": 119.78342345680107, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 954257116, + "version": 131, + "versionNonce": 645389156, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0.8342433616519429, + 119.78342345680107 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "lM8YhhEb6z3bDEJuu4m3d", + "type": "rectangle", + "x": 1058, + "y": 285.75, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 837465444, + "version": 206, + "versionNonce": 34667740, + "isDeleted": false + }, + { + "id": "6vuRc6aOqhdoEuttAeKt2", + "type": "rectangle", + "x": 1044.75, + "y": 270.75, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1395971300, + "version": 187, + "versionNonce": 556715748, + "isDeleted": false + }, + { + "id": "-fAixshWpoLjWfYDN2XQn", + "type": "rectangle", + "x": 1074, + "y": 299.75, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 684139868, + "version": 244, + "versionNonce": 1762984284, + "isDeleted": false + }, + { + "id": "9ly5PAEyfbUB3QeBMxvhA", + "type": "arrow", + "x": 1034, + "y": 339.75, + "width": 249, + "height": 72, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 27470428, + "version": 132, + "versionNonce": 532370020, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -249, + 72 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "YhujSfYUqGuzRLIsXy9jA", + "type": "arrow", + "x": 1038, + "y": 287.75, + "width": 613, + "height": 95, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 726845916, + "version": 227, + "versionNonce": 1243903452, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -613, + 95 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "nGE3aIbk_78-jtgJnzjfR", + "type": "text", + "x": 773, + "y": 281.25, + "width": 138, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 43455324, + "version": 126, + "versionNonce": 820733412, + "isDeleted": false, + "text": "read &writes ", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "pMb4NsyES3HkiEmKW13wZ", + "type": "text", + "x": 910.5, + "y": 375.25, + "width": 89, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1546616164, + "version": 115, + "versionNonce": 844192348, + "isDeleted": false, + "text": "read only", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "CplzIegzDmsR0AWIsy2Nk", + "type": "ellipse", + "x": 1356.625, + "y": 684.25, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#868e96", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 617074276, + "version": 371, + "versionNonce": 1506609508, + "isDeleted": false + }, + { + "id": "FlifalHMUV7XU10a9nWsU", + "type": "line", + "x": 1356.625, + "y": 698.25, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1194179036, + "version": 335, + "versionNonce": 1013821148, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "ZML-phKfbEV-wWs8DNoCS", + "type": "line", + "x": 1478.625, + "y": 699.25, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1460167140, + "version": 387, + "versionNonce": 1303264484, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "sxeHgJbMHMUfe7pY2nz37", + "type": "ellipse", + "x": 1357.625, + "y": 769.25, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#868e96", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1167203932, + "version": 377, + "versionNonce": 1493922652, + "isDeleted": false + }, + { + "id": "XsWj_GN-Vna0UzrJZdvtD", + "type": "text", + "x": 1236.875, + "y": 475, + "width": 404, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 83159388, + "version": 129, + "versionNonce": 899524964, + "isDeleted": false, + "text": "Clone from External Storage", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "SR_Mx08J0VFGQwsyXJVBl", + "type": "diamond", + "x": 1365.375, + "y": 579.5, + "width": 112, + "height": 37, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#15aabf", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1095611996, + "version": 189, + "versionNonce": 377268188, + "isDeleted": false + }, + { + "id": "11TagJjN0nNn4FtwrFBke", + "type": "text", + "x": 1353.375, + "y": 543, + "width": 146, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 206859620, + "version": 186, + "versionNonce": 451367908, + "isDeleted": false, + "text": "Master Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "-JJEKxE4FMwgRCKN8xPNm", + "type": "arrow", + "x": 1425.375, + "y": 621.5, + "width": 5, + "height": 48, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1669237468, + "version": 158, + "versionNonce": 1758678108, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -5, + 48 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "N5Rd_bvTstfjJ4_Fudzbh", + "type": "arrow", + "x": 1096.375, + "y": 888.75, + "width": 237.5, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1399054820, + "version": 73, + "versionNonce": 1910595804, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 237.5, + -90 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "v3KmUjaQRCHwnpY1QivMD", + "type": "text", + "x": 1085.125, + "y": 813.75, + "width": 205, + "height": 25, + "angle": 5.92323678115218, + "strokeColor": "#000000", + "backgroundColor": "#15aabf", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1552100828, + "version": 184, + "versionNonce": 537307876, + "isDeleted": false, + "text": "Restore point in time", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "bp0xDgg7BJ_M54RKiISdr", + "type": "text", + "x": 1198.875, + "y": 290, + "width": 210, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#15aabf", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 217533284, + "version": 62, + "versionNonce": 1228465500, + "isDeleted": false, + "text": "Your Application", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "6spCo6ScZkWngCoTAcCiW", + "type": "ellipse", + "x": 947.625, + "y": 1154.5, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 905272292, + "version": 401, + "versionNonce": 1143724508, + "isDeleted": false + }, + { + "id": "F6nmJkeYAfpVLmj-qHhol", + "type": "line", + "x": 947.625, + "y": 1168.5, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1401537628, + "version": 380, + "versionNonce": 1216102884, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "4ccPjDIHyolwYrRjePIZQ", + "type": "line", + "x": 1069.625, + "y": 1169.5, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1445478244, + "version": 432, + "versionNonce": 1036795484, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "7n9Va4aCv2frULofssYe1", + "type": "ellipse", + "x": 949.875, + "y": 1245.75, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 789108956, + "version": 447, + "versionNonce": 48391524, + "isDeleted": false + }, + { + "id": "bwfIiq16JgZiB3KxNuUSE", + "type": "text", + "x": 979.625, + "y": 1291, + "width": 56, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1160950500, + "version": 314, + "versionNonce": 1592529628, + "isDeleted": false, + "text": "pod-0", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "5cQrHFOQIIiWj1g6iz-0u", + "type": "text", + "x": 919.625, + "y": 1321.75, + "width": 166, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1655790940, + "version": 331, + "versionNonce": 1673009380, + "isDeleted": false, + "text": "spilo-role=master", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "r715B9Xh8NK3tgDrHa_ta", + "type": "diamond", + "x": 949.125, + "y": 1405.75, + "width": 112, + "height": 37, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1638053476, + "version": 297, + "versionNonce": 1713198940, + "isDeleted": false + }, + { + "id": "IUD9gKNFwwDGssTCQMivE", + "type": "text", + "x": 940.875, + "y": 1453, + "width": 146, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 131420636, + "version": 301, + "versionNonce": 378231908, + "isDeleted": false, + "text": "Master Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "olLquFzP1bWExaxUnEtOC", + "type": "rectangle", + "x": 827.625, + "y": 1581.5, + "width": 357.50000000000006, + "height": 122.49999999999991, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 312740956, + "version": 279, + "versionNonce": 623521764, + "isDeleted": false + }, + { + "id": "2T8bRODpwVvJ_LB346Tys", + "type": "rectangle", + "x": 843.875, + "y": 1599, + "width": 93.75, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 154310628, + "version": 158, + "versionNonce": 1035743324, + "isDeleted": false + }, + { + "id": "15CHgGbfgXivIS1LM9pn0", + "type": "rectangle", + "x": 956.25, + "y": 1597.25, + "width": 93.75, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 259570396, + "version": 197, + "versionNonce": 799183716, + "isDeleted": false + }, + { + "id": "jH3mVWOPIu4Z23BhJup1k", + "type": "rectangle", + "x": 1070.75, + "y": 1597.75, + "width": 93.75, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 291171428, + "version": 182, + "versionNonce": 902364, + "isDeleted": false + }, + { + "id": "2NVzp0qsCAL-Wg6lDjMT1", + "type": "text", + "x": 1209.875, + "y": 1626.25, + "width": 318, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1993947100, + "version": 172, + "versionNonce": 608339684, + "isDeleted": false, + "text": "PGBouncer Deployment", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "MFTqi-5KJ9TBfh7uqNVkE", + "type": "arrow", + "x": 1003.3728133276104, + "y": 1572.2587801109535, + "width": 3.0021866723894846, + "height": 89.75878011095347, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 893334116, + "version": 134, + "versionNonce": 473483612, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 3.0021866723894846, + -89.75878011095347 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "H4Mnirn-bNpmp8P3r8e3l", + "type": "arrow", + "x": 1006.375, + "y": 1397.5, + "width": 1.25, + "height": 46.25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1660277988, + "version": 121, + "versionNonce": 2006452836, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -1.25, + -46.25 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "Ti-szwL_0r7pK6bVA0cpb", + "type": "rectangle", + "x": 791.3125, + "y": 1836.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1569973084, + "version": 397, + "versionNonce": 1972321756, + "isDeleted": false + }, + { + "id": "hrkuTxEdhUF4slXQn3d8p", + "type": "rectangle", + "x": 778.0625, + "y": 1821.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1620283492, + "version": 378, + "versionNonce": 1550917092, + "isDeleted": false + }, + { + "id": "F5w_L9hECvkc5DtRnBhAw", + "type": "rectangle", + "x": 807.3125, + "y": 1850.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1349982172, + "version": 435, + "versionNonce": 891495004, + "isDeleted": false + }, + { + "id": "0JIhGYPnVdHMjSdyBHj6E", + "type": "text", + "x": 1345.9375, + "y": 1919.25, + "width": 500, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#15aabf", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1745609700, + "version": 321, + "versionNonce": 2077999460, + "isDeleted": false, + "text": "Your Application at large pod counts", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "ZHbFN5wfBx1NalW6UbZeM", + "type": "rectangle", + "x": 976.5, + "y": 1841.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 349222500, + "version": 431, + "versionNonce": 298219228, + "isDeleted": false + }, + { + "id": "3PnSvfGnPGd5XNGLNcRBH", + "type": "rectangle", + "x": 963.25, + "y": 1826.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1720144348, + "version": 412, + "versionNonce": 757462244, + "isDeleted": false + }, + { + "id": "x83a6lJgB-m58TYSJr7m8", + "type": "rectangle", + "x": 992.5, + "y": 1855.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 632572388, + "version": 469, + "versionNonce": 403389276, + "isDeleted": false + }, + { + "id": "yM7IY0uff7LBVk-kR8nsa", + "type": "rectangle", + "x": 1164, + "y": 1840, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 649289700, + "version": 428, + "versionNonce": 769505380, + "isDeleted": false + }, + { + "id": "fNOgZlF9boua0Bgb1tpPt", + "type": "rectangle", + "x": 1150.75, + "y": 1825, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1486749788, + "version": 409, + "versionNonce": 1917022172, + "isDeleted": false + }, + { + "id": "paHFy_DHE_S0Pf7eoTgTF", + "type": "rectangle", + "x": 1180, + "y": 1854, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 142984036, + "version": 466, + "versionNonce": 1506872292, + "isDeleted": false + }, + { + "id": "of-4scEuTkHGGLqyZpKil", + "type": "rectangle", + "x": 915.25, + "y": 1956.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1682303844, + "version": 428, + "versionNonce": 813765724, + "isDeleted": false + }, + { + "id": "1k7q_Kt5-BRgsBAWNpyUz", + "type": "rectangle", + "x": 902, + "y": 1941.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1542188252, + "version": 409, + "versionNonce": 1661857636, + "isDeleted": false + }, + { + "id": "0D93-v_7WWIol3r7Oedv9", + "type": "rectangle", + "x": 931.25, + "y": 1970.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 448454372, + "version": 466, + "versionNonce": 278276316, + "isDeleted": false + }, + { + "id": "i9XufVTPGOfxMO3Yr8pKR", + "type": "rectangle", + "x": 1107.75, + "y": 1957.5, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1850083044, + "version": 425, + "versionNonce": 165831396, + "isDeleted": false + }, + { + "id": "lsBHDW4ED1LbgRpMwVXqp", + "type": "rectangle", + "x": 1094.5, + "y": 1942.5, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 103631196, + "version": 406, + "versionNonce": 1175760220, + "isDeleted": false + }, + { + "id": "9Gv1xBz0PDtcIYrABxd-9", + "type": "rectangle", + "x": 1123.75, + "y": 1971.5, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 892045924, + "version": 463, + "versionNonce": 1423730276, + "isDeleted": false + }, + { + "id": "DpB7bLHnenCrJH0UrF2wE", + "type": "arrow", + "x": 818.875, + "y": 1809.25, + "width": 151.25, + "height": 92.5, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 600387420, + "version": 99, + "versionNonce": 1112449500, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 151.25, + -92.5 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "hhcSLd8LaNTFKrVkn7cpx", + "type": "arrow", + "x": 1011.375, + "y": 1814.25, + "width": 6.25, + "height": 98.75, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 475116516, + "version": 107, + "versionNonce": 1221196260, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -6.25, + -98.75 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "O-cD7TO6FEdMCeX8PHLiN", + "type": "arrow", + "x": 1196.375, + "y": 1811.75, + "width": 145, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1661665244, + "version": 99, + "versionNonce": 2086012508, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -145, + -90 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "3v6ZCgcVGuyCcReUPeom4", + "type": "arrow", + "x": 1003.875, + "y": 1141.875, + "width": 40, + "height": 85, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 658333276, + "version": 94, + "versionNonce": 549002596, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -40, + -85 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "Lqxtc4vXdq0aP4yP3vrbo", + "type": "text", + "x": 1307.375, + "y": 1155.461956521739, + "width": 49.250000000000036, + "height": 74.94565217391309, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 246194532, + "version": 192, + "versionNonce": 956095196, + "isDeleted": false, + "text": "...", + "fontSize": 59.68022440392712, + "fontFamily": 1, + "textAlign": "left", + "baseline": 53 + }, + { + "id": "MpgPzae1IIzHDBhZ2dAsQ", + "type": "ellipse", + "x": 1153.125, + "y": 1147.625, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 321763812, + "version": 437, + "versionNonce": 100126948, + "isDeleted": false + }, + { + "id": "jz6wcBLdv5F7JTNEPJoUG", + "type": "line", + "x": 1153.125, + "y": 1161.625, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1259226716, + "version": 416, + "versionNonce": 1389698908, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "QxnIa0hyHn9qELemjPAEE", + "type": "line", + "x": 1275.125, + "y": 1162.625, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1755635044, + "version": 468, + "versionNonce": 1209275492, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "87YsZ0WhoaM_0KQpdMPcy", + "type": "ellipse", + "x": 1154.125, + "y": 1232.625, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 112007900, + "version": 443, + "versionNonce": 36754396, + "isDeleted": false + }, + { + "id": "5uRjADtUCimVL1nnCSAMG", + "type": "text", + "x": 1137.625, + "y": 1313.625, + "width": 165, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1516998884, + "version": 374, + "versionNonce": 349956068, + "isDeleted": false, + "text": "spilo-role=replica", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "UdAXrAQCc8cnIf7FmuG9G", + "type": "text", + "x": 1193.125, + "y": 1291.625, + "width": 44, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 8216412, + "version": 305, + "versionNonce": 622983260, + "isDeleted": false, + "text": "pod-1", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "iwGdpD-yA5HRQQQa5oObs", + "type": "ellipse", + "x": 414.5, + "y": 1190.625, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 863881572, + "version": 523, + "versionNonce": 811420516, + "isDeleted": false + }, + { + "id": "coxnHZBDXMLYv6wioEitM", + "type": "line", + "x": 414.5, + "y": 1204.625, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 589885148, + "version": 502, + "versionNonce": 125471964, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "cAt5dmyINZTv98xAyZP8b", + "type": "line", + "x": 536.5, + "y": 1205.625, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 860290276, + "version": 554, + "versionNonce": 753286884, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "3xYBhCRPLLXNqfYMftBXC", + "type": "ellipse", + "x": 416.75, + "y": 1281.875, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1180611420, + "version": 569, + "versionNonce": 718049628, + "isDeleted": false + }, + { + "id": "N0zhhdNd_2qdGLiaFXa5k", + "type": "text", + "x": 446.5, + "y": 1327.125, + "width": 56, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1520287844, + "version": 436, + "versionNonce": 587514468, + "isDeleted": false, + "text": "pod-0", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "1P4LkT1SjnEcULq5T-RN9", + "type": "text", + "x": 386.5, + "y": 1357.875, + "width": 166, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 2003298268, + "version": 456, + "versionNonce": 241829340, + "isDeleted": false, + "text": "spilo-role=master", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "mdHM232x-I25hYtSee4Ge", + "type": "diamond", + "x": 416, + "y": 1441.875, + "width": 112, + "height": 37, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 22025188, + "version": 419, + "versionNonce": 1770444260, + "isDeleted": false + }, + { + "id": "ZJlUU3rT957t1RdM5RInK", + "type": "text", + "x": 407.75, + "y": 1489.125, + "width": 146, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 173444188, + "version": 423, + "versionNonce": 2003102300, + "isDeleted": false, + "text": "Master Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "B1Qvi5HNMM_roG6ON0BQD", + "type": "arrow", + "x": 473.25, + "y": 1433.625, + "width": 1.25, + "height": 46.25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1433139044, + "version": 243, + "versionNonce": 1299872100, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -1.25, + -46.25 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "XDPXWDobzcFq4D7R2gqh4", + "type": "arrow", + "x": 692.625, + "y": 1046.125, + "width": 178.44955960164782, + "height": 137.8884688600433, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1705785180, + "version": 76, + "versionNonce": 948911836, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -178.44955960164782, + 137.8884688600433 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "83GpNN-rHKKsQhs40OAmS", + "type": "text", + "x": 547.875, + "y": 1088.625, + "width": 97, + "height": 25, + "angle": 5.608444364956034, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 728359772, + "version": 135, + "versionNonce": 1244507364, + "isDeleted": false, + "text": "continuous", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "0iYzRksu9bFd4FRhNS4vL", + "type": "rectangle", + "x": 356.375, + "y": 1152.3750000000002, + "width": 228.75000000000003, + "height": 381.2499999999999, + "angle": 0, + "strokeColor": "#5f3dc4", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1912617828, + "version": 132, + "versionNonce": 2054882140, + "isDeleted": false + }, + { + "id": "_w-R0RLuSZGXtjYndbBuF", + "type": "text", + "x": 205.125, + "y": 1332.375, + "width": 225, + "height": 35, + "angle": 4.723624462642652, + "strokeColor": "#5f3dc4", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 339311844, + "version": 143, + "versionNonce": 1093562468, + "isDeleted": false, + "text": "Standby Cluster", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "_MObJgJ_wonJNtPP1p6ZX", + "type": "rectangle", + "x": 430.25, + "y": 1656.625, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1745745252, + "version": 505, + "versionNonce": 2141117532, + "isDeleted": false + }, + { + "id": "ers2ZQYACj6DmohzS1V61", + "type": "rectangle", + "x": 417, + "y": 1641.625, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1610711772, + "version": 486, + "versionNonce": 1938859876, + "isDeleted": false + }, + { + "id": "a7YApsVVLIgOEWz-wRw-R", + "type": "rectangle", + "x": 446.25, + "y": 1670.625, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1022470372, + "version": 543, + "versionNonce": 1220210908, + "isDeleted": false + }, + { + "id": "-HPd9HbQfjaSSnyVtxzAI", + "type": "arrow", + "x": 468.1411898687482, + "y": 1625.0761543437839, + "width": 5.733810131251801, + "height": 83.95115434378386, + "angle": 0, + "strokeColor": "#5f3dc4", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1743894116, + "version": 51, + "versionNonce": 1766726372, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 5.733810131251801, + -83.95115434378386 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "74_FKllXVIzRwRKskBVca", + "type": "text", + "x": 238.625, + "y": 1657.375, + "width": 143, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 990652900, + "version": 76, + "versionNonce": 778073436, + "isDeleted": false, + "text": "Read only app\n(e.g. migration)", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 43 + }, + { + "id": "nhSbj-rOoBSaObbxkDC06", + "type": "text", + "x": -17.8215120805408, + "y": 418.7138510177598, + "width": 297.4346399775327, + "height": 35.408885711611035, + "angle": 4.728012709005166, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 107991652, + "version": 133, + "versionNonce": 1041309284, + "isDeleted": false, + "text": "Postgres Deployment", + "fontSize": 28.32710856928882, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "P6l-7fAr8MMc9KsB2kZw_", + "type": "text", + "x": 409.625, + "y": 30.375, + "width": 1036.2000000000005, + "height": 55, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 2073692900, + "version": 209, + "versionNonce": 1038182876, + "isDeleted": false, + "text": "Zalando Postgres Operator : Supported Setups", + "fontSize": 43.99999999999998, + "fontFamily": 1, + "textAlign": "left", + "baseline": 39 + }, + { + "id": "bZZTypDxRVen7t6-5jXqf", + "type": "arrow", + "x": 404.8103958096076, + "y": 598.9661255675087, + "width": 125.49452508728018, + "height": 1.4560642183292885, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1915235036, + "version": 95, + "versionNonce": 2115434724, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 125.49452508728018, + -1.4560642183292885 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "g1TuJyldd9lGKOVeuxdCc", + "type": "text", + "x": 424.125, + "y": 563.625, + "width": 67, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1359012196, + "version": 11, + "versionNonce": 173002332, + "isDeleted": false, + "text": "stream", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + } + ], + "appState": { + "viewBackgroundColor": "#ffffff" + } +} \ No newline at end of file diff --git a/docs/diagrams/neutral_operator.png b/docs/diagrams/neutral_operator.png new file mode 100644 index 000000000..b8f807d63 Binary files /dev/null and b/docs/diagrams/neutral_operator.png differ diff --git a/docs/gsoc-2019/ideas.md b/docs/gsoc-2019/ideas.md deleted file mode 100644 index 456a5a0ff..000000000 --- a/docs/gsoc-2019/ideas.md +++ /dev/null @@ -1,63 +0,0 @@ -

Google Summer of Code 2019

- -## Applications steps - -1. Please carefully read the official [Google Summer of Code Student Guide](https://google.github.io/gsocguides/student/) -2. Join the #postgres-operator slack channel under [Postgres Slack](https://postgres-slack.herokuapp.com) to introduce yourself to the community and get quick feedback on your application. -3. Select a project from the list of ideas below or propose your own. -4. Write a proposal draft. Please open an issue with the label `gsoc2019_application` in the [operator repository](https://github.com/zalando/postgres-operator/issues) so that the community members can publicly review it. See proposal instructions below for details. -5. Submit proposal and the proof of enrollment before April 9 2019 18:00 UTC through the web site of the Program. - -## Project ideas - - -### Place database pods into the "Guaranteed" Quality-of-Service class - -* **Description**: Kubernetes runtime does not kill pods in this class on condition they stay within their resource limits, which is desirable for the DB pods serving production workloads. To be assigned to that class, pod's resources must equal its limits. The task is to add the `enableGuaranteedQoSClass` or the like option to the Postgres manifest and the operator configmap that forcibly re-write pod resources to match the limits. -* **Recommended skills**: golang, basic Kubernetes abstractions -* **Difficulty**: moderate -* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) - -### Implement the kubectl plugin for the Postgres CustomResourceDefinition - -* **Description**: [kubectl plugins](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/) enable extending the Kubernetes command-line client `kubectl` with commands to manage custom resources. The task is to design and implement a plugin for the `kubectl postgres` command, -that can enable, for example, correct deletion or major version upgrade of Postgres clusters. -* **Recommended skills**: golang, shell scripting, operational experience with Kubernetes -* **Difficulty**: moderate to medium, depending on the plugin design -* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) - -### Implement the openAPIV3Schema for the Postgres CRD - -* **Description**: at present the operator validates a database manifest on its own. -It will be helpful to reject erroneous manifests before they reach the operator using the [native Kubernetes CRD validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation). It is up to the student to decide whether to write the schema manually or to adopt existing [schema generator developed for the Prometheus project](https://github.com/ant31/crd-validation). -* **Recommended skills**: golang, JSON schema -* **Difficulty**: medium -* **Mentor(s)**: Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) -* **Issue**: [#388](https://github.com/zalando/postgres-operator/issues/388) - -### Design a solution for the local testing of the operator - -* **Description**: The current way of testing is to run minikube, either manually or with some tooling around it like `/run-operator_locally.sh` or Vagrant. This has at least three problems: -First, minikube is a single node cluster, so it is unsuitable for testing vital functions such as pod migration between nodes. Second, minikube starts slowly; that prolongs local testing. -Third, every contributor needs to come up with their own solution for local testing. The task is to come up with a better option which will enable us to conveniently and uniformly run e2e tests locally / potentially in Travis CI. -A promising option is the Kubernetes own [kind](https://github.com/kubernetes-sigs/kind) -* **Recommended skills**: Docker, shell scripting, basic Kubernetes abstractions -* **Difficulty**: medium to hard depending on the selected desing -* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) -* **Issue**: [#475](https://github.com/zalando/postgres-operator/issues/475) - -### Detach a Postgres cluster from the operator for maintenance - -* **Description**: sometimes a Postgres cluster requires manual maintenance. During such maintenance the operator should ignore all the changes manually applied to the cluster. - Currently the only way to achieve this behavior is to shutdown the operator altogether, for instance by scaling down the operator's own deployment to zero pods. That approach evidently affects all Postgres databases under the operator control and thus is highly undesirable in production Kubernetes clusters. It would be much better to be able to detach only the desired Postgres cluster from the operator for the time being and re-attach it again after maintenance. -* **Recommended skills**: golang, architecture of a Kubernetes operator -* **Difficulty**: hard - requires significant modification of the operator's internals and careful consideration of the corner cases. -* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) -* **Issue**: [#421](https://github.com/zalando/postgres-operator/issues/421) - -### Propose your own idea - -Feel free to come up with your own ideas. For inspiration, -see [our bug tracker](https://github.com/zalando/postgres-operator/issues), -the [official `CustomResouceDefinition` docs](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/) -and [other operators](https://github.com/operator-framework/awesome-operators). diff --git a/docs/quickstart.md b/docs/quickstart.md index d2c88b9a4..86d2fabbf 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -34,8 +34,8 @@ Postgres cluster. This can work in two ways: via a ConfigMap or a custom The Postgres Operator can be deployed in the following ways: * Manual deployment +* Kustomization * Helm chart -* Operator Lifecycle Manager (OLM) ### Manual deployment setup @@ -91,20 +91,6 @@ The chart works with both Helm 2 and Helm 3. The `crd-install` hook from v2 will be skipped with warning when using v3. Documentation for installing applications with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/). -### Operator Lifecycle Manager (OLM) - -The [Operator Lifecycle Manager (OLM)](https://github.com/operator-framework/operator-lifecycle-manager) -has been designed to facilitate management of K8s operators. It has to be -installed in your K8s environment. When OLM is set up simply download and deploy -the Postgres Operator with the following command: - -```bash -kubectl create -f https://operatorhub.io/install/postgres-operator.yaml -``` - -This installs the operator in the `operators` namespace. More information can be -found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator). - ## Check if Postgres Operator is running Starting the operator may take a few seconds. Check if the operator pod is @@ -142,6 +128,9 @@ To deploy the UI simply apply all its manifests files or use the UI helm chart: # manual deployment kubectl apply -f ui/manifests/ +# or kustomization +kubectl apply -k github.com/zalando/postgres-operator/ui/manifests + # or helm chart helm install postgres-operator-ui ./charts/postgres-operator-ui ``` @@ -160,7 +149,7 @@ You can now access the web interface by port forwarding the UI pod (mind the label selector) and enter `localhost:8081` in your browser: ```bash -kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081 +kubectl port-forward svc/postgres-operator-ui 8081:80 ``` Available option are explained in detail in the [UI docs](operator-ui.md). diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 576031543..589921bc5 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -65,6 +65,20 @@ These parameters are grouped directly under the `spec` key in the manifest. custom Docker image that overrides the **docker_image** operator parameter. It should be a [Spilo](https://github.com/zalando/spilo) image. Optional. +* **schedulerName** + specifies the scheduling profile for database pods. If no value is provided + K8s' `default-scheduler` will be used. Optional. + +* **spiloRunAsUser** + sets the user ID which should be used in the container to run the process. + This must be set to run the container without root. By default the container + runs with root. This option only works for Spilo versions >= 1.6-p3. + +* **spiloRunAsGroup** + sets the group ID which should be used in the container to run the process. + This must be set to run the container without root. By default the container + runs with root. This option only works for Spilo versions >= 1.6-p3. + * **spiloFSGroup** the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and writable by the group ID specified. This will override the **spilo_fsgroup** @@ -141,10 +155,15 @@ These parameters are grouped directly under the `spec` key in the manifest. configured (so you can override the operator configuration). Optional. * **enableConnectionPooler** - Tells the operator to create a connection pooler with a database. If this - field is true, a connection pooler deployment will be created even if + Tells the operator to create a connection pooler with a database for the master + service. If this field is true, a connection pooler deployment will be created even if `connectionPooler` section is empty. Optional, not set by default. +* **enableReplicaConnectionPooler** + Tells the operator to create a connection pooler with a database for the replica + service. If this field is true, a connection pooler deployment for replica + will be created even if `connectionPooler` section is empty. Optional, not set by default. + * **enableLogicalBackup** Determines if the logical backup of this cluster should be taken and uploaded to S3. Default: false. Optional. @@ -231,10 +250,10 @@ explanation of `ttl` and `loop_wait` parameters. * **synchronous_mode** Patroni `synchronous_mode` parameter value. The default is set to `false`. Optional. - + * **synchronous_mode_strict** Patroni `synchronous_mode_strict` parameter value. Can be used in addition to `synchronous_mode`. The default is set to `false`. Optional. - + ## Postgres container resources Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) @@ -387,8 +406,10 @@ CPU and memory limits for the sidecar container. Parameters are grouped under the `connectionPooler` top-level key and specify configuration for connection pooler. If this section is not empty, a connection -pooler will be created for a database even if `enableConnectionPooler` is not -present. +pooler will be created for master service only even if `enableConnectionPooler` +is not present. But if this section is present then it defines the configuration +for both master and replica pooler services (if `enableReplicaConnectionPooler` + is enabled). * **numberOfInstances** How many instances of connection pooler to create. diff --git a/docs/reference/command_line_and_environment.md b/docs/reference/command_line_and_environment.md index ece29b094..35f47cabf 100644 --- a/docs/reference/command_line_and_environment.md +++ b/docs/reference/command_line_and_environment.md @@ -56,3 +56,7 @@ The following environment variables are accepted by the operator: * **CRD_READY_WAIT_INTERVAL** defines the interval between consecutive attempts waiting for the `postgresql` CRD to be created. The default is 5s. + +* **ENABLE_JSON_LOGGING** + Set to `true` for JSON formatted logging output. + The default is false. diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index a81cabfc4..01e5c4039 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -76,9 +76,15 @@ Those are top-level keys, containing both leaf keys and groups. The default is `true`. * **enable_lazy_spilo_upgrade** - Instruct operator to update only the statefulsets with the new image without immediately doing the rolling update. The assumption is pods will be re-started later with the new image, for example due to the node rotation. + Instruct operator to update only the statefulsets with new images (Spilo and InitContainers) without immediately doing the rolling update. The assumption is pods will be re-started later with new images, for example due to the node rotation. The default is `false`. +* **enable_pgversion_env_var** + With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`. + +* **enable_spilo_wal_path_compat** + enables backwards compatible path between Spilo 12 and Spilo 13 images. The default is `false`. + * **etcd_host** Etcd connection string for Patroni defined as `host:port`. Not required when Patroni native Kubernetes support is used. The default is empty (use @@ -200,6 +206,16 @@ configuration they are grouped under the `kubernetes` key. of a database created by the operator. If the annotation key is also provided by the database definition, the database definition value is used. +* **delete_annotation_date_key** + key name for annotation that compares manifest value with current date in the + YYYY-MM-DD format. Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. + The default is empty which also disables this delete protection check. + +* **delete_annotation_name_key** + key name for annotation that compares manifest value with Postgres cluster name. + Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. The default is + empty which also disables this delete protection check. + * **downscaler_annotations** An array of annotations that should be passed from Postgres CRD on to the statefulset and, if exists, to the connection pooler deployment as well. @@ -252,8 +268,20 @@ configuration they are grouped under the `kubernetes` key. teams API. The default is `postgresql-operator`. * **infrastructure_roles_secret_name** - namespaced name of the secret containing infrastructure roles names and - passwords. + *deprecated*: namespaced name of the secret containing infrastructure roles + with user names, passwords and role membership. + +* **infrastructure_roles_secrets** + array of infrastructure role definitions which reference existing secrets + and specify the key names from which user name, password and role membership + are extracted. For the ConfigMap this has to be a string which allows + referencing only one infrastructure roles secret. The default is empty. + +* **inherited_annotations** + list of annotation keys that can be inherited from the cluster manifest, and + added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PDB` and + `Services`) created by the operator incl. the ones from the connection + pooler deployment. The default is empty. * **pod_role_label** name of the label assigned to the Postgres pods (and services/endpoints) by @@ -264,15 +292,16 @@ configuration they are grouped under the `kubernetes` key. objects. The default is `application:spilo`. * **inherited_labels** - list of labels that can be inherited from the cluster manifest, and added to - each child objects (`StatefulSet`, `Pod`, `Service` and `Endpoints`) created - by the operator. Typical use case is to dynamically pass labels that are - specific to a given Postgres cluster, in order to implement `NetworkPolicy`. - The default is empty. + list of label keys that can be inherited from the cluster manifest, and + added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PVCs`, + `PDB`, `Service`, `Endpoints` and `Secrets`) created by the operator. + Typical use case is to dynamically pass labels that are specific to a + given Postgres cluster, in order to implement `NetworkPolicy`. The default + is empty. * **cluster_name_label** - name of the label assigned to Kubernetes objects created by the operator that - indicates which cluster a given object belongs to. The default is + name of the label assigned to Kubernetes objects created by the operator + that indicates which cluster a given object belongs to. The default is `cluster-name`. * **node_readiness_label** @@ -301,6 +330,16 @@ configuration they are grouped under the `kubernetes` key. that should be assigned to the Postgres pods. The priority class itself must be defined in advance. Default is empty (use the default priority class). +* **spilo_runasuser** + sets the user ID which should be used in the container to run the process. + This must be set to run the container without root. By default the container + runs with root. This option only works for Spilo versions >= 1.6-p3. + +* **spilo_runasgroup** + sets the group ID which should be used in the container to run the process. + This must be set to run the container without root. By default the container + runs with root. This option only works for Spilo versions >= 1.6-p3. + * **spilo_fsgroup** the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and writable by the group ID specified. This is required to run Spilo as a @@ -333,6 +372,12 @@ configuration they are grouped under the `kubernetes` key. of stateful sets of PG clusters. The default is `ordered_ready`, the second possible value is `parallel`. +* **storage_resize_mode** + defines how operator handels the difference between requested volume size and + actual size. Available options are: ebs - tries to resize EBS volume, pvc - + changes PVC definition, off - disables resize of the volumes. Default is "pvc". + When using OpenShift please use one of the other available options. + ## Kubernetes resource requests This group allows you to configure resource requests for the Postgres pods. @@ -402,6 +447,12 @@ CRD-based configuration. Those options affect the behavior of load balancers created by the operator. In the CRD-based configuration they are grouped under the `load_balancer` key. +* **custom_service_annotations** + This key/value map provides a list of annotations that get attached to each + service of a cluster created by the operator. If the annotation key is also + provided by the cluster definition, the manifest value is used. + Optional. + * **db_hosted_zone** DNS zone for the cluster DNS name when the load balancer is configured for the cluster. Only used when combined with @@ -418,11 +469,8 @@ In the CRD-based configuration they are grouped under the `load_balancer` key. cluster. Can be overridden by individual cluster settings. The default is `false`. -* **custom_service_annotations** - This key/value map provides a list of annotations that get attached to each - service of a cluster created by the operator. If the annotation key is also - provided by the cluster definition, the manifest value is used. - Optional. +* **external_traffic_policy** defines external traffic policy for load + balancers. Allowed values are `Cluster` (default) and `Local`. * **master_dns_name_format** defines the DNS name string template for the master load balancer cluster. The default is @@ -451,6 +499,20 @@ yet officially supported. present and accessible by Postgres pods. At the moment, supported services by Spilo are S3 and GCS. The default is empty. +* **wal_gs_bucket** + GCS bucket to use for shipping WAL segments with WAL-E. A bucket has to be + present and accessible by Postgres pods. Note, only the name of the bucket is + required. At the moment, supported services by Spilo are S3 and GCS. + The default is empty. + +* **gcp_credentials** + Used to set the GOOGLE_APPLICATION_CREDENTIALS environment variable for the pods. + This is used in with conjunction with the `additional_secret_mount` and + `additional_secret_mount_path` to properly set the credentials for the spilo + containers. This will allow users to use specific + [service accounts](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform). + The default is empty + * **log_s3_bucket** S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS. The bucket has to be present and accessible by Postgres pods. The default is @@ -466,10 +528,22 @@ yet officially supported. AWS region used to store EBS volumes. The default is `eu-central-1`. * **additional_secret_mount** - Additional Secret (aws or gcp credentials) to mount in the pod. The default is empty. + Additional Secret (aws or gcp credentials) to mount in the pod. + The default is empty. * **additional_secret_mount_path** - Path to mount the above Secret in the filesystem of the container(s). The default is empty. + Path to mount the above Secret in the filesystem of the container(s). + The default is empty. + +* **enable_ebs_gp3_migration** + enable automatic migration on AWS from gp2 to gp3 volumes, that are smaller + than the configured max size (see below). This ignores that EBS gp3 is by + default only 125 MB/sec vs 250 MB/sec for gp2 >= 333GB. + The default is `false`. + +* **enable_ebs_gp3_migration_max_size** + defines the maximum volume size in GB until which auto migration happens. + Default is 1000 (1TB) which matches 3000 IOPS. ## Logical backup @@ -489,6 +563,10 @@ grouped under the `logical_backup` key. The default image is the same image built with the Zalando-internal CI pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup" +* **logical_backup_provider** + Specifies the storage provider to which the backup should be uploaded (`s3` or `gcs`). + Default: "s3" + * **logical_backup_s3_bucket** S3 bucket to store backup results. The bucket has to be present and accessible by Postgres pods. Default: empty. @@ -509,6 +587,9 @@ grouped under the `logical_backup` key. * **logical_backup_s3_secret_access_key** When set, value will be in AWS_SECRET_ACCESS_KEY env variable. The Default is empty. +* **logical_backup_google_application_credentials** + Specifies the path of the google cloud service account json file. Default is empty. + ## Debugging the operator Options to aid debugging of the operator itself. Grouped under the `debug` key. @@ -549,8 +630,8 @@ key. The default is `"log_statement:all"` * **enable_team_superuser** - whether to grant superuser to team members created from the Teams API. - The default is `false`. + whether to grant superuser to members of the cluster's owning team created + from the Teams API. The default is `false`. * **team_admin_role** role name to grant to team members created from the Teams API. The default is @@ -583,6 +664,16 @@ key. cluster to administer Postgres and maintain infrastructure built around it. The default is empty. +* **enable_postgres_team_crd** + toggle to make the operator watch for created or updated `PostgresTeam` CRDs + and create roles for specified additional teams and members. + The default is `false`. + +* **enable_postgres_team_crd_superusers** + in a `PostgresTeam` CRD additional superuser teams can assigned to teams that + own clusters. With this flag set to `false`, it will be ignored. + The default is `false`. + ## Logging and REST API Parameters affecting logging and REST API listener. In the CRD-based diff --git a/docs/user.md b/docs/user.md index 3683fdf61..3463983f7 100644 --- a/docs/user.md +++ b/docs/user.md @@ -49,7 +49,7 @@ Note, that the name of the cluster must start with the `teamId` and `-`. At Zalando we use team IDs (nicknames) to lower the chance of duplicate cluster names and colliding entities. The team ID would also be used to query an API to get all members of a team and create [database roles](#teams-api-roles) for -them. +them. Besides, the maximum cluster name length is 53 characters. ## Watch pods being created @@ -150,23 +150,62 @@ user. There are two ways to define them: #### Infrastructure roles secret -The infrastructure roles secret is specified by the `infrastructure_roles_secret_name` -parameter. The role definition looks like this (values are base64 encoded): +Infrastructure roles can be specified by the `infrastructure_roles_secrets` +parameter where you can reference multiple existing secrets. Prior to `v1.6.0` +the operator could only reference one secret with the +`infrastructure_roles_secret_name` option. However, this secret could contain +multiple roles using the same set of keys plus incrementing index. ```yaml -user1: ZGJ1c2Vy -password1: c2VjcmV0 -inrole1: b3BlcmF0b3I= +apiVersion: v1 +kind: Secret +metadata: + name: postgresql-infrastructure-roles +data: + user1: ZGJ1c2Vy + password1: c2VjcmV0 + inrole1: b3BlcmF0b3I= + user2: ... ``` The block above describes the infrastructure role 'dbuser' with password -'secret' that is a member of the 'operator' role. For the following definitions -one must increase the index, i.e. the next role will be defined as 'user2' and -so on. The resulting role will automatically be a login role. +'secret' that is a member of the 'operator' role. The resulting role will +automatically be a login role. -Note that with definitions that solely use the infrastructure roles secret -there is no way to specify role options (like superuser or nologin) or role -memberships. This is where the ConfigMap comes into play. +With the new option users can configure the names of secret keys that contain +the user name, password etc. The secret itself is referenced by the +`secretname` key. If the secret uses a template for multiple roles as described +above list them separately. + +```yaml +apiVersion: v1 +kind: OperatorConfiguration +metadata: + name: postgresql-operator-configuration +configuration: + kubernetes: + infrastructure_roles_secrets: + - secretname: "postgresql-infrastructure-roles" + userkey: "user1" + passwordkey: "password1" + rolekey: "inrole1" + - secretname: "postgresql-infrastructure-roles" + userkey: "user2" + ... +``` + +Note, only the CRD-based configuration allows for referencing multiple secrets. +As of now, the ConfigMap is restricted to either one or the existing template +option with `infrastructure_roles_secret_name`. Please, refer to the example +manifests to understand how `infrastructure_roles_secrets` has to be configured +for the [configmap](../manifests/configmap.yaml) or [CRD configuration](../manifests/postgresql-operator-default-configuration.yaml). + +If both `infrastructure_roles_secret_name` and `infrastructure_roles_secrets` +are defined the operator will create roles for both of them. So make sure, +they do not collide. Note also, that with definitions that solely use the +infrastructure roles secret there is no way to specify role options (like +superuser or nologin) or role memberships. This is where the additional +ConfigMap comes into play. #### Secret plus ConfigMap @@ -230,6 +269,144 @@ to choose superusers, group roles, [PAM configuration](https://github.com/CyberD etc. An OAuth2 token can be passed to the Teams API via a secret. The name for this secret is configurable with the `oauth_token_secret_name` parameter. +### Additional teams and members per cluster + +Postgres clusters are associated with one team by providing the `teamID` in +the manifest. Additional superuser teams can be configured as mentioned in +the previous paragraph. However, this is a global setting. To assign +additional teams, superuser teams and single users to clusters of a given +team, use the [PostgresTeam CRD](../manifests/postgresteam.yaml). + +Note, by default the `PostgresTeam` support is disabled in the configuration. +Switch `enable_postgres_team_crd` flag to `true` and the operator will start to +watch for this CRD. Make sure, the cluster role is up to date and contains a +section for [PostgresTeam](../manifests/operator-service-account-rbac.yaml#L30). + +#### Additional teams + +To assign additional teams and single users to clusters of a given team, +define a mapping with the `PostgresTeam` Kubernetes resource. The Postgres +Operator will read such team mappings each time it syncs all Postgres clusters. + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: PostgresTeam +metadata: + name: custom-team-membership +spec: + additionalTeams: + a-team: + - "b-team" +``` + +With the example above the operator will create login roles for all members +of `b-team` in every cluster owned by `a-team`. It's possible to do vice versa +for clusters of `b-team` in one manifest: + +```yaml +spec: + additionalTeams: + a-team: + - "b-team" + b-team: + - "a-team" +``` + +You see, the `PostgresTeam` CRD is a global team mapping and independent from +the Postgres manifests. It is possible to define multiple mappings, even with +redundant content - the Postgres operator will create one internal cache from +it. Additional teams are resolved transitively, meaning you will also add +users for their `additionalTeams`, e.g.: + +```yaml +spec: + additionalTeams: + a-team: + - "b-team" + - "c-team" + b-team: + - "a-team" +``` + +This creates roles for members of the `c-team` team not only in all clusters +owned by `a-team`, but as well in cluster owned by `b-team`, as `a-team` is +an `additionalTeam` to `b-team` + +Not, you can also define `additionalSuperuserTeams` in the `PostgresTeam` +manifest. By default, this option is disabled and must be configured with +`enable_postgres_team_crd_superusers` to make it work. + +#### Virtual teams + +There can be "virtual teams" that do not exist in the Teams API. It can make +it easier to map a group of teams to many other teams: + +```yaml +spec: + additionalTeams: + a-team: + - "virtual-team" + b-team: + - "virtual-team" + virtual-team: + - "c-team" + - "d-team" +``` + +This example would create roles for members of `c-team` and `d-team` plus +additional `virtual-team` members in clusters owned by `a-team` or `b-team`. + +#### Teams changing their names + +With `PostgresTeams` it is also easy to cover team name changes. Just add +the mapping between old and new team name and the rest can stay the same. +E.g. if team `a-team`'s name would change to `f-team` in the teams API it +could be reflected in a `PostgresTeam` mapping with just two lines: + +```yaml +spec: + additionalTeams: + a-team: + - "f-team" +``` + +This is helpful, because Postgres cluster names are immutable and can not +be changed. Only via cloning it could get a different name starting with the +new `teamID`. + +#### Additional members + +Single members might be excluded from teams although they continue to work +with the same people. However, the teams API would not reflect this anymore. +To still add a database role for former team members list their role under +the `additionalMembers` section of the `PostgresTeam` resource: + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: PostgresTeam +metadata: + name: custom-team-membership +spec: + additionalMembers: + a-team: + - "tia" +``` + +This will create the login role `tia` in every cluster owned by `a-team`. +The user can connect to databases like the other team members. + +The `additionalMembers` map can also be used to define users of virtual +teams, e.g. for `virtual-team` we used above: + +```yaml +spec: + additionalMembers: + virtual-team: + - "flynch" + - "rdecker" + - "briggs" +``` + ## Prepared databases with roles and default privileges The `users` section in the manifests only allows for creating database roles @@ -412,7 +589,7 @@ manifest the operator will raise the limits to the configured minimum values. If no resources are defined in the manifest they will be obtained from the configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests). -## Use taints and tolerations for dedicated PostgreSQL nodes +## Use taints, tolerations and node affinity for dedicated PostgreSQL nodes To ensure Postgres pods are running on nodes without any other application pods, you can use [taints and tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) @@ -426,6 +603,28 @@ spec: effect: NoSchedule ``` +If you need the pods to be scheduled on specific nodes you may use [node affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/) +to specify a set of label(s), of which a prospective host node must have at least one. This could be used to +place nodes with certain hardware capabilities (e.g. SSD drives) in certain environments or network segments, +e.g. for PCI compliance. + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: acid-minimal-cluster +spec: + teamId: "ACID" + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: environment + operator: In + values: + - pci +``` + ## How to clone an existing PostgreSQL cluster You can spin up a new cluster as a clone of the existing one, using a `clone` @@ -437,6 +636,10 @@ section in the spec. There are two options here: Note, that cloning can also be used for [major version upgrades](administrator.md#minor-and-major-version-upgrade) of PostgreSQL. +## In-place major version upgrade + +Starting with Spilo 13, operator supports in-place major version upgrade to a higher major version (e.g. from PG 10 to PG 12). To trigger the upgrade, simply increase the version in the manifest. It is your responsibility to test your applications against the new version before the upgrade; downgrading is not supported. The easiest way to do so is to try the upgrade on the cloned cluster first. For details of how Spilo does the upgrade [see here](https://github.com/zalando/spilo/pull/488), operator implementation is described [in the admin docs](administrator.md#minor-and-major-version-upgrade). + ### Clone from S3 Cloning from S3 has the advantage that there is no impact on your production @@ -698,11 +901,17 @@ manifest: ```yaml spec: enableConnectionPooler: true + enableReplicaConnectionPooler: true ``` This will tell the operator to create a connection pooler with default configuration, through which one can access the master via a separate service -`{cluster-name}-pooler`. In most of the cases the +`{cluster-name}-pooler`. With the first option, connection pooler for master service +is created and with the second option, connection pooler for replica is created. +Note that both of these flags are independent of each other and user can set or +unset any of them as per their requirements without any effect on the other. + +In most of the cases the [default configuration](reference/operator_parameters.md#connection-pooler-configuration) should be good enough. To configure a new connection pooler individually for each Postgres cluster, specify: diff --git a/e2e/Dockerfile b/e2e/Dockerfile index 236942d04..3eb8c9d70 100644 --- a/e2e/Dockerfile +++ b/e2e/Dockerfile @@ -1,8 +1,12 @@ -FROM ubuntu:18.04 +# An image to run e2e tests. +# The image does not include the tests; all necessary files are bind-mounted when a container starts. +FROM ubuntu:20.04 LABEL maintainer="Team ACID @ Zalando " -COPY manifests ./manifests -COPY requirements.txt tests ./ +ENV TERM xterm-256color + +COPY requirements.txt ./ +COPY scm-source.json ./ RUN apt-get update \ && apt-get install --no-install-recommends -y \ @@ -10,14 +14,15 @@ RUN apt-get update \ python3-setuptools \ python3-pip \ curl \ + vim \ && pip3 install --no-cache-dir -r requirements.txt \ - && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl \ + && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl \ && chmod +x ./kubectl \ && mv ./kubectl /usr/local/bin/kubectl \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -ARG VERSION=dev -RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" ./__init__.py - -CMD ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"] +# working line +# python3 -m unittest discover -v --failfast -k test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade --start-directory tests +ENTRYPOINT ["python3", "-m", "unittest"] +CMD ["discover","-v","--failfast","--start-directory","/tests"] \ No newline at end of file diff --git a/e2e/Makefile b/e2e/Makefile index 70a2ff4e9..b904ad763 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -1,6 +1,6 @@ .PHONY: clean copy docker push tools test -BINARY ?= postgres-operator-e2e-tests +BINARY ?= postgres-operator-e2e-tests-runner BUILD_FLAGS ?= -v CGO_ENABLED ?= 0 ifeq ($(RACE),1) @@ -34,15 +34,23 @@ copy: clean mkdir manifests cp ../manifests -r . -docker: copy - docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" . +docker: scm-source.json + docker build -t "$(IMAGE):$(TAG)" . + +scm-source.json: ../.git + echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json push: docker docker push "$(IMAGE):$(TAG)" -tools: docker +tools: # install pinned version of 'kind' - GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1 + # go get must run outside of a dir with a (module-based) Go project ! + # otherwise go get updates project's dependencies and/or behaves differently + cd "/tmp" && GO111MODULE=on go get sigs.k8s.io/kind@v0.9.0 -e2etest: - ./run.sh +e2etest: tools copy clean + ./run.sh main + +cleanup: clean + ./run.sh cleanup \ No newline at end of file diff --git a/e2e/README.md b/e2e/README.md index f1bc5f9ed..3bba6ccc3 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -12,6 +12,10 @@ Docker. Docker Go +# Notice + +The `manifest` folder in e2e tests folder is not commited to git, it comes from `/manifests` + ## Build test runner In the directory of the cloned Postgres Operator repository change to the e2e @@ -29,12 +33,78 @@ runtime. In the e2e folder you can invoke tests either with `make test` or with: ```bash -./run.sh +./run.sh main ``` To run both the build and test step you can invoke `make e2e` from the parent directory. +To run the end 2 end test and keep the kind state execute: +```bash +NOCLEANUP=True ./run.sh main +``` + +## Run indidual test + +After having executed a normal E2E run with `NOCLEANUP=True` Kind still continues to run, allowing you subsequent test runs. + +To run an individual test, run the following command in the `e2e` directory + +```bash +NOCLEANUP=True ./run.sh main tests.test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade +``` + +## Inspecting Kind + +If you want to inspect Kind/Kubernetes cluster, switch `kubeconfig` file and context +```bash +# save the old config in case you have it +export KUBECONFIG_SAVED=$KUBECONFIG + +# use the one created by e2e tests +export KUBECONFIG=/tmp/kind-config-postgres-operator-e2e-tests + +# this kubeconfig defines a single context +kubectl config use-context kind-postgres-operator-e2e-tests +``` + +or use the following script to exec into the K8s setup and then use `kubectl` + +```bash +./exec_into_env.sh + +# use kubectl +kubectl get pods + +# watch relevant objects +./scripts/watch_objects.sh + +# get operator logs +./scripts/get_logs.sh +``` + +If you want to inspect the state of the `kind` cluster manually with a single command, add a `context` flag +```bash +kubectl get pods --context kind-kind +``` +or set the context for a few commands at once + + + +## Cleaning up Kind + +To cleanup kind and start fresh + +```bash +e2e/run.sh cleanup +``` + +That also helps in case you see the +``` +ERROR: no nodes found for cluster "postgres-operator-e2e-tests" +``` +that happens when the `kind` cluster was deleted manually but its configuraiton file was not. + ## Covered use cases The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py): diff --git a/e2e/exec.sh b/e2e/exec.sh new file mode 100755 index 000000000..1ab666e5e --- /dev/null +++ b/e2e/exec.sh @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +kubectl exec -i $1 -- sh -c "$2" diff --git a/e2e/exec_into_env.sh b/e2e/exec_into_env.sh new file mode 100755 index 000000000..ef12ba18a --- /dev/null +++ b/e2e/exec_into_env.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +export cluster_name="postgres-operator-e2e-tests" +export kubeconfig_path="/tmp/kind-config-${cluster_name}" +export operator_image="registry.opensource.zalan.do/acid/postgres-operator:latest" +export e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3" + +docker run -it --entrypoint /bin/bash --network=host -e "TERM=xterm-256color" \ + --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \ + --mount type=bind,source="$(readlink -f manifests)",target=/manifests \ + --mount type=bind,source="$(readlink -f tests)",target=/tests \ + --mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \ + --mount type=bind,source="$(readlink -f scripts)",target=/scripts \ + -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}" diff --git a/e2e/kind-cluster-postgres-operator-e2e-tests.yaml b/e2e/kind-cluster-postgres-operator-e2e-tests.yaml index a59746fd3..752e993cd 100644 --- a/e2e/kind-cluster-postgres-operator-e2e-tests.yaml +++ b/e2e/kind-cluster-postgres-operator-e2e-tests.yaml @@ -1,5 +1,5 @@ kind: Cluster -apiVersion: kind.sigs.k8s.io/v1alpha3 +apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane - role: worker diff --git a/e2e/requirements.txt b/e2e/requirements.txt index 68a8775ff..4f6f5ac5f 100644 --- a/e2e/requirements.txt +++ b/e2e/requirements.txt @@ -1,3 +1,3 @@ -kubernetes==9.0.0 +kubernetes==11.0.0 timeout_decorator==0.4.1 -pyyaml==5.1 +pyyaml==5.3.1 diff --git a/e2e/run.sh b/e2e/run.sh index c7825bfd3..2d5708778 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -6,71 +6,86 @@ set -o nounset set -o pipefail IFS=$'\n\t' -cd $(dirname "$0"); - readonly cluster_name="postgres-operator-e2e-tests" readonly kubeconfig_path="/tmp/kind-config-${cluster_name}" +readonly spilo_image="registry.opensource.zalan.do/acid/spilo-13-e2e:0.3" +readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3" + +export GOPATH=${GOPATH-~/go} +export PATH=${GOPATH}/bin:$PATH + +echo "Clustername: ${cluster_name}" +echo "Kubeconfig path: ${kubeconfig_path}" function pull_images(){ - operator_tag=$(git describe --tags --always --dirty) if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]] then docker pull registry.opensource.zalan.do/acid/postgres-operator:latest fi - if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:${operator_tag}) ]] - then - docker pull registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:latest - fi - operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1) - e2e_test_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests" --format "{{.Repository}}:{{.Tag}}" | head -1) } function start_kind(){ - + echo "Starting kind for e2e tests" # avoid interference with previous test runs if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]] then kind delete cluster --name ${cluster_name} fi - kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml + export KUBECONFIG="${kubeconfig_path}" + kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml + docker pull "${spilo_image}" + kind load docker-image "${spilo_image}" --name ${cluster_name} +} + +function load_operator_image() { + echo "Loading operator image" + export KUBECONFIG="${kubeconfig_path}" kind load docker-image "${operator_image}" --name ${cluster_name} - kind load docker-image "${e2e_test_image}" --name ${cluster_name} - KUBECONFIG="$(kind get kubeconfig-path --name=${cluster_name})" - export KUBECONFIG } function set_kind_api_server_ip(){ + echo "Setting up kind API server ip" # use the actual kubeconfig to connect to the 'kind' API server # but update the IP address of the API server to the one from the Docker 'bridge' network - cp "${KUBECONFIG}" /tmp readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase - readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane) + readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane) sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}" } function run_tests(){ + echo "Running tests... image: ${e2e_test_runner_image}" + # tests modify files in ./manifests, so we mount a copy of this directory done by the e2e Makefile - docker run --rm --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_image}" + docker run --rm --network=host -e "TERM=xterm-256color" \ + --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \ + --mount type=bind,source="$(readlink -f manifests)",target=/manifests \ + --mount type=bind,source="$(readlink -f tests)",target=/tests \ + --mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \ + --mount type=bind,source="$(readlink -f scripts)",target=/scripts \ + -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}" ${E2E_TEST_CASE-} $@ } -function clean_up(){ +function cleanup(){ + echo "Executing cleanup" unset KUBECONFIG kind delete cluster --name ${cluster_name} rm -rf ${kubeconfig_path} } function main(){ - - trap "clean_up" QUIT TERM EXIT - + echo "Entering main function..." + [[ -z ${NOCLEANUP-} ]] && trap "cleanup" QUIT TERM EXIT pull_images - start_kind + [[ ! -f ${kubeconfig_path} ]] && start_kind + load_operator_image set_kind_api_server_ip - run_tests + + shift + run_tests $@ exit 0 } -main "$@" +"$1" $@ diff --git a/e2e/scripts/cleanup.sh b/e2e/scripts/cleanup.sh new file mode 100755 index 000000000..2c82388ae --- /dev/null +++ b/e2e/scripts/cleanup.sh @@ -0,0 +1,7 @@ +#!/bin/bash +kubectl delete postgresql acid-minimal-cluster +kubectl delete deployments -l application=db-connection-pooler,cluster-name=acid-minimal-cluster +kubectl delete statefulsets -l application=spilo,cluster-name=acid-minimal-cluster +kubectl delete services -l application=spilo,cluster-name=acid-minimal-cluster +kubectl delete configmap postgres-operator +kubectl delete deployment postgres-operator \ No newline at end of file diff --git a/e2e/scripts/get_logs.sh b/e2e/scripts/get_logs.sh new file mode 100755 index 000000000..1639f3995 --- /dev/null +++ b/e2e/scripts/get_logs.sh @@ -0,0 +1,2 @@ +#!/bin/bash +kubectl logs $(kubectl get pods -l name=postgres-operator --field-selector status.phase=Running -o jsonpath='{.items..metadata.name}') diff --git a/e2e/scripts/watch_objects.sh b/e2e/scripts/watch_objects.sh new file mode 100755 index 000000000..4c9b82404 --- /dev/null +++ b/e2e/scripts/watch_objects.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +watch -c " +kubectl get postgresql --all-namespaces +echo +echo -n 'Rolling upgrade pending: ' +kubectl get statefulset -o jsonpath='{.items..metadata.annotations.zalando-postgres-operator-rolling-update-required}' +echo +echo +echo 'Pods' +kubectl get pods -l application=spilo -o wide --all-namespaces +echo +kubectl get pods -l application=db-connection-pooler -o wide --all-namespaces +echo +echo 'Statefulsets' +kubectl get statefulsets --all-namespaces +echo +echo 'Deployments' +kubectl get deployments --all-namespaces -l application=db-connection-pooler +kubectl get deployments --all-namespaces -l application=postgres-operator +echo +echo +echo 'Step from operator deployment' +kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.annotations.step}' +echo +echo +echo 'Spilo Image in statefulset' +kubectl get pods -l application=spilo -o jsonpath='{.items..spec.containers..image}' +echo +echo +echo 'Queue Status' +kubectl exec -it \$(kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.name}') -- curl localhost:8080/workers/all/status/ +echo" \ No newline at end of file diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py new file mode 100644 index 000000000..95e1dc9ad --- /dev/null +++ b/e2e/tests/k8s_api.py @@ -0,0 +1,532 @@ +import json +import time +import subprocess +import warnings + +from kubernetes import client, config +from kubernetes.client.rest import ApiException + + +def to_selector(labels): + return ",".join(["=".join(lbl) for lbl in labels.items()]) + + +class K8sApi: + + def __init__(self): + + # https://github.com/kubernetes-client/python/issues/309 + warnings.simplefilter("ignore", ResourceWarning) + + self.config = config.load_kube_config() + self.k8s_client = client.ApiClient() + + self.core_v1 = client.CoreV1Api() + self.apps_v1 = client.AppsV1Api() + self.batch_v1_beta1 = client.BatchV1beta1Api() + self.custom_objects_api = client.CustomObjectsApi() + self.policy_v1_beta1 = client.PolicyV1beta1Api() + self.storage_v1_api = client.StorageV1Api() + + +class K8s: + ''' + Wraps around K8s api client and helper methods. + ''' + + RETRY_TIMEOUT_SEC = 1 + + def __init__(self, labels='x=y', namespace='default'): + self.api = K8sApi() + self.labels = labels + self.namespace = namespace + + def get_pg_nodes(self, pg_cluster_name, namespace='default'): + master_pod_node = '' + replica_pod_nodes = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master': + master_pod_node = pod.spec.node_name + elif pod.metadata.labels.get('spilo-role') == 'replica': + replica_pod_nodes.append(pod.spec.node_name) + + return master_pod_node, replica_pod_nodes + + def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'): + m = [] + r = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + m.append(pod.spec.node_name) + elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': + r.append(pod.spec.node_name) + + return m, r + + def wait_for_operator_pod_start(self): + self.wait_for_pod_start("name=postgres-operator") + # give operator time to subscribe to objects + time.sleep(1) + return True + + def get_operator_pod(self): + pods = self.api.core_v1.list_namespaced_pod( + 'default', label_selector='name=postgres-operator' + ).items + + pods = list(filter(lambda x: x.status.phase == 'Running', pods)) + + if len(pods): + return pods[0] + + return None + + def get_operator_log(self): + operator_pod = self.get_operator_pod() + pod_name = operator_pod.metadata.name + return self.api.core_v1.read_namespaced_pod_log( + name=pod_name, + namespace='default' + ) + + def pg_get_status(self, name="acid-minimal-cluster", namespace="default"): + pg = self.api.custom_objects_api.get_namespaced_custom_object( + "acid.zalan.do", "v1", namespace, "postgresqls", name) + return pg.get("status", {}).get("PostgresClusterStatus", None) + + def wait_for_pod_start(self, pod_labels, namespace='default'): + pod_phase = 'No pod running' + while pod_phase != 'Running': + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items + if pods: + pod_phase = pods[0].status.phase + + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_service_type(self, svc_labels, namespace='default'): + svc_type = '' + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + svc_type = svc.spec.type + return svc_type + + def check_service_annotations(self, svc_labels, annotations, namespace='default'): + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + for key, value in annotations.items(): + if not svc.metadata.annotations or key not in svc.metadata.annotations or svc.metadata.annotations[key] != value: + print("Expected key {} not found in service annotations {}".format(key, svc.metadata.annotations)) + return False + return True + + def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items + for sset in ssets: + for key, value in annotations.items(): + if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value: + print("Expected key {} not found in statefulset annotations {}".format(key, sset.metadata.annotations)) + return False + return True + + def scale_cluster(self, number_of_instances, name="acid-minimal-cluster", namespace="default"): + body = { + "spec": { + "numberOfInstances": number_of_instances + } + } + self.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", namespace, "postgresqls", name, body) + + def wait_for_running_pods(self, labels, number, namespace=''): + while self.count_pods_with_label(labels) != number: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_pods_to_stop(self, labels, namespace=''): + while self.count_pods_with_label(labels) != 0: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_service(self, labels, namespace='default'): + def get_services(): + return self.api.core_v1.list_namespaced_service( + namespace, label_selector=labels + ).items + + while not get_services(): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def count_pods_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) + + def count_services_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items) + + def count_endpoints_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items) + + def count_secrets_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items) + + def count_statefulsets_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items) + + def count_deployments_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items) + + def count_pdbs_with_label(self, labels, namespace='default'): + return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget( + namespace, label_selector=labels).items) + + def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: x.status.phase == 'Running', pods))) + + def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): + pod_phase = 'Failing over' + new_pod_node = '' + + while (pod_phase != 'Running') or (new_pod_node not in failover_targets): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + if pods: + new_pod_node = pods[0].spec.node_name + pod_phase = pods[0].status.phase + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_logical_backup_job(self, namespace='default'): + return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") + + def wait_for_logical_backup_job(self, expected_num_of_jobs): + while (len(self.get_logical_backup_job().items) != expected_num_of_jobs): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_logical_backup_job_deletion(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=0) + + def wait_for_logical_backup_job_creation(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=1) + + def delete_operator_pod(self, step="Delete operator pod"): + # patching the pod template in the deployment restarts the operator pod + self.api.apps_v1.patch_namespaced_deployment("postgres-operator", "default", {"spec": {"template": {"metadata": {"annotations": {"step": "{}-{}".format(step, time.time())}}}}}) + self.wait_for_operator_pod_start() + + def update_config(self, config_map_patch, step="Updating operator deployment"): + self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) + self.delete_operator_pod(step=step) + + def patch_statefulset(self, data, name="acid-minimal-cluster", namespace="default"): + self.api.apps_v1.patch_namespaced_stateful_set(name, namespace, data) + + def create_with_kubectl(self, path): + return subprocess.run( + ["kubectl", "apply", "-f", path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def exec_with_kubectl(self, pod, cmd): + return subprocess.run(["./exec.sh", pod, cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def get_patroni_state(self, pod): + r = self.exec_with_kubectl(pod, "patronictl list -f json") + if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[": + return [] + return json.loads(r.stdout.decode()) + + def get_operator_state(self): + pod = self.get_operator_pod() + if pod is None: + return None + pod = pod.metadata.name + + r = self.exec_with_kubectl(pod, "curl localhost:8080/workers/all/status/") + if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{": + return None + + return json.loads(r.stdout.decode()) + + def get_patroni_running_members(self, pod="acid-minimal-cluster-0"): + result = self.get_patroni_state(pod) + return list(filter(lambda x: "State" in x and x["State"] == "running", result)) + + def get_deployment_replica_count(self, name="acid-minimal-cluster-pooler", namespace="default"): + try: + deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace) + return deployment.spec.replicas + except ApiException: + return None + + def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1) + if len(ssets.items) == 0: + return None + return ssets.items[0].spec.template.spec.containers[0].image + + def get_effective_pod_image(self, pod_name, namespace='default'): + ''' + Get the Spilo image pod currently uses. In case of lazy rolling updates + it may differ from the one specified in the stateful set. + ''' + pod = self.api.core_v1.list_namespaced_pod( + namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name) + + if len(pod.items) == 0: + return None + return pod.items[0].spec.containers[0].image + + def get_cluster_leader_pod(self, pg_cluster_name, namespace='default'): + labels = { + 'application': 'spilo', + 'cluster-name': pg_cluster_name, + 'spilo-role': 'master', + } + + pods = self.api.core_v1.list_namespaced_pod( + namespace, label_selector=to_selector(labels)).items + + if pods: + return pods[0] + + +class K8sBase: + ''' + K8s basic API wrapper class supposed to be inherited by other more specific classes for e2e tests + ''' + + RETRY_TIMEOUT_SEC = 1 + + def __init__(self, labels='x=y', namespace='default'): + self.api = K8sApi() + self.labels = labels + self.namespace = namespace + + def get_pg_nodes(self, pg_cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'): + master_pod_node = '' + replica_pod_nodes = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master': + master_pod_node = pod.spec.node_name + elif pod.metadata.labels.get('spilo-role') == 'replica': + replica_pod_nodes.append(pod.spec.node_name) + + return master_pod_node, replica_pod_nodes + + def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'): + m = [] + r = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + m.append(pod.spec.node_name) + elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': + r.append(pod.spec.node_name) + + return m, r + + def wait_for_operator_pod_start(self): + self.wait_for_pod_start("name=postgres-operator") + + def get_operator_pod(self): + pods = self.api.core_v1.list_namespaced_pod( + 'default', label_selector='name=postgres-operator' + ).items + + if pods: + return pods[0] + + return None + + def get_operator_log(self): + operator_pod = self.get_operator_pod() + pod_name = operator_pod.metadata.name + return self.api.core_v1.read_namespaced_pod_log( + name=pod_name, + namespace='default' + ) + + def wait_for_pod_start(self, pod_labels, namespace='default'): + pod_phase = 'No pod running' + while pod_phase != 'Running': + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items + if pods: + pod_phase = pods[0].status.phase + + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_service_type(self, svc_labels, namespace='default'): + svc_type = '' + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + svc_type = svc.spec.type + return svc_type + + def check_service_annotations(self, svc_labels, annotations, namespace='default'): + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + for key, value in annotations.items(): + if key not in svc.metadata.annotations or svc.metadata.annotations[key] != value: + print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotation)) + return False + return True + + def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items + for sset in ssets: + for key, value in annotations.items(): + if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value: + print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotation)) + return False + return True + + def scale_cluster(self, number_of_instances, name="acid-minimal-cluster", namespace="default"): + body = { + "spec": { + "numberOfInstances": number_of_instances + } + } + self.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", namespace, "postgresqls", name, body) + + def wait_for_running_pods(self, labels, number, namespace=''): + while self.count_pods_with_label(labels) != number: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_pods_to_stop(self, labels, namespace=''): + while self.count_pods_with_label(labels) != 0: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_service(self, labels, namespace='default'): + def get_services(): + return self.api.core_v1.list_namespaced_service( + namespace, label_selector=labels + ).items + + while not get_services(): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def count_pods_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) + + def count_services_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items) + + def count_endpoints_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items) + + def count_secrets_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items) + + def count_statefulsets_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items) + + def count_deployments_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items) + + def count_pdbs_with_label(self, labels, namespace='default'): + return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget( + namespace, label_selector=labels).items) + + def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: x.status.phase == 'Running', pods))) + + def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): + pod_phase = 'Failing over' + new_pod_node = '' + + while (pod_phase != 'Running') or (new_pod_node not in failover_targets): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + if pods: + new_pod_node = pods[0].spec.node_name + pod_phase = pods[0].status.phase + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_logical_backup_job(self, namespace='default'): + return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") + + def wait_for_logical_backup_job(self, expected_num_of_jobs): + while (len(self.get_logical_backup_job().items) != expected_num_of_jobs): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_logical_backup_job_deletion(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=0) + + def wait_for_logical_backup_job_creation(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=1) + + def delete_operator_pod(self, step="Delete operator deplyment"): + self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}}) + self.wait_for_operator_pod_start() + + def update_config(self, config_map_patch, step="Updating operator deployment"): + self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) + self.delete_operator_pod(step=step) + + def create_with_kubectl(self, path): + return subprocess.run( + ["kubectl", "apply", "-f", path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def exec_with_kubectl(self, pod, cmd): + return subprocess.run(["./exec.sh", pod, cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def get_patroni_state(self, pod): + r = self.exec_with_kubectl(pod, "patronictl list -f json") + if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[": + return [] + return json.loads(r.stdout.decode()) + + def get_patroni_running_members(self, pod): + result = self.get_patroni_state(pod) + return list(filter(lambda x: x["State"] == "running", result)) + + def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1) + if len(ssets.items) == 0: + return None + return ssets.items[0].spec.template.spec.containers[0].image + + def get_effective_pod_image(self, pod_name, namespace='default'): + ''' + Get the Spilo image pod currently uses. In case of lazy rolling updates + it may differ from the one specified in the stateful set. + ''' + pod = self.api.core_v1.list_namespaced_pod( + namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name) + + if len(pod.items) == 0: + return None + return pod.items[0].spec.containers[0].image + + +""" + Inspiriational classes towards easier writing of end to end tests with one cluster per test case +""" + + +class K8sOperator(K8sBase): + def __init__(self, labels="name=postgres-operator", namespace="default"): + super().__init__(labels, namespace) + + +class K8sPostgres(K8sBase): + def __init__(self, labels="cluster-name=acid-minimal-cluster", namespace="default"): + super().__init__(labels, namespace) + + def get_pg_nodes(self): + master_pod_node = '' + replica_pod_nodes = [] + podsList = self.api.core_v1.list_namespaced_pod(self.namespace, label_selector=self.labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master': + master_pod_node = pod.spec.node_name + elif pod.metadata.labels.get('spilo-role') == 'replica': + replica_pod_nodes.append(pod.spec.node_name) + + return master_pod_node, replica_pod_nodes diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 18b9852c4..9e2035652 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -1,16 +1,30 @@ +import json import unittest import time import timeout_decorator -import subprocess -import warnings import os import yaml -from kubernetes import client, config +from datetime import datetime +from kubernetes import client + +from tests.k8s_api import K8s +from kubernetes.client.rest import ApiException + +SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-13-e2e:0.3" +SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-13-e2e:0.4" def to_selector(labels): - return ",".join(["=".join(l) for l in labels.items()]) + return ",".join(["=".join(lbl) for lbl in labels.items()]) + + +def clean_list(values): + # value is not stripped bytes, strip and convert to a string + clean = lambda v: v.strip().decode() + notNone = lambda v: v + + return list(filter(notNone, map(clean, values))) class EndToEndTestCase(unittest.TestCase): @@ -21,6 +35,41 @@ class EndToEndTestCase(unittest.TestCase): # `kind` pods may stuck in the `Terminating` phase for a few minutes; hence high test timeout TEST_TIMEOUT_SEC = 600 + def eventuallyEqual(self, f, x, m, retries=60, interval=2): + while True: + try: + y = f() + self.assertEqual(y, x, m.format(y)) + return True + except AssertionError: + retries = retries - 1 + if not retries > 0: + raise + time.sleep(interval) + + def eventuallyNotEqual(self, f, x, m, retries=60, interval=2): + while True: + try: + y = f() + self.assertNotEqual(y, x, m.format(y)) + return True + except AssertionError: + retries = retries - 1 + if not retries > 0: + raise + time.sleep(interval) + + def eventuallyTrue(self, f, m, retries=60, interval=2): + while True: + try: + self.assertTrue(f(), m) + return True + except AssertionError: + retries = retries - 1 + if not retries > 0: + raise + time.sleep(interval) + @classmethod @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def setUpClass(cls): @@ -32,30 +81,67 @@ class EndToEndTestCase(unittest.TestCase): In the case of test failure the cluster will stay to enable manual examination; next invocation of "make test" will re-create it. ''' + print("Test Setup being executed") # set a single K8s wrapper for all tests k8s = cls.k8s = K8s() + # remove existing local storage class and create hostpath class + try: + k8s.api.storage_v1_api.delete_storage_class("standard") + except ApiException as e: + print("Failed to delete the 'standard' storage class: {0}".format(e)) + # operator deploys pod service account there on start up # needed for test_multi_namespace_support() - cls.namespace = "test" - v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.namespace)) - k8s.api.core_v1.create_namespace(v1_namespace) + cls.test_namespace = "test" + try: + v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.test_namespace)) + k8s.api.core_v1.create_namespace(v1_namespace) + except ApiException as e: + print("Failed to create the '{0}' namespace: {1}".format(cls.test_namespace, e)) # submit the most recent operator image built on the Docker host with open("manifests/postgres-operator.yaml", 'r+') as f: operator_deployment = yaml.safe_load(f) operator_deployment["spec"]["template"]["spec"]["containers"][0]["image"] = os.environ['OPERATOR_IMAGE'] + + with open("manifests/postgres-operator.yaml", 'w') as f: yaml.dump(operator_deployment, f, Dumper=yaml.Dumper) + with open("manifests/configmap.yaml", 'r+') as f: + configmap = yaml.safe_load(f) + configmap["data"]["workers"] = "1" + configmap["data"]["docker_image"] = SPILO_CURRENT + + with open("manifests/configmap.yaml", 'w') as f: + yaml.dump(configmap, f, Dumper=yaml.Dumper) + for filename in ["operator-service-account-rbac.yaml", + "postgresql.crd.yaml", + "operatorconfiguration.crd.yaml", + "postgresteam.crd.yaml", "configmap.yaml", - "postgres-operator.yaml"]: + "postgres-operator.yaml", + "api-service.yaml", + "infrastructure-roles.yaml", + "infrastructure-roles-new.yaml", + "e2e-storage-class.yaml"]: result = k8s.create_with_kubectl("manifests/" + filename) print("stdout: {}, stderr: {}".format(result.stdout, result.stderr)) k8s.wait_for_operator_pod_start() + # reset taints and tolerations + k8s.api.core_v1.patch_node("postgres-operator-e2e-tests-worker", {"spec": {"taints": []}}) + k8s.api.core_v1.patch_node("postgres-operator-e2e-tests-worker2", {"spec": {"taints": []}}) + + # make sure we start a new operator on every new run, + # this tackles the problem when kind is reused + # and the Docker image is in fact changed (dirty one) + + k8s.update_config({}, step="TestSuite Startup") + actual_operator_image = k8s.api.core_v1.list_namespaced_pod( 'default', label_selector='name=postgres-operator').items[0].spec.containers[0].image print("Tested operator image: {}".format(actual_operator_image)) # shows up after tests finish @@ -69,6 +155,40 @@ class EndToEndTestCase(unittest.TestCase): print('Operator log: {}'.format(k8s.get_operator_log())) raise + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_overwrite_pooler_deployment(self): + self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml") + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 1, + "Initial broken deplyment not rolled out") + + self.k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': True + } + }) + + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 2, + "Operator did not succeed in overwriting labels") + + self.k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': False + } + }) + + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(lambda: self.k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), + 0, "Pooler pods not scaled down") + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_enable_disable_connection_pooler(self): ''' @@ -78,182 +198,382 @@ class EndToEndTestCase(unittest.TestCase): the end turn connection pooler off to not interfere with other tests. ''' k8s = self.k8s - service_labels = { - 'cluster-name': 'acid-minimal-cluster', - } - pod_labels = dict({ - 'connection-pooler': 'acid-minimal-cluster-pooler', - }) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - pod_selector = to_selector(pod_labels) - service_selector = to_selector(service_labels) + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': True, + 'enableReplicaConnectionPooler': True, + } + }) + + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 2, + "Deployment replicas is 2 default") + self.eventuallyEqual(lambda: k8s.count_running_pods( + "connection-pooler=acid-minimal-cluster-pooler"), + 2, "No pooler pods found") + self.eventuallyEqual(lambda: k8s.count_running_pods( + "connection-pooler=acid-minimal-cluster-pooler-repl"), + 2, "No pooler replica pods found") + self.eventuallyEqual(lambda: k8s.count_services_with_label( + 'application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 2, "No pooler service found") + + # Turn off only master connection pooler + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': False, + 'enableReplicaConnectionPooler': True, + } + }) + + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, + "Operator does not get in sync") + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler-repl"), 2, + "Deployment replicas is 2 default") + self.eventuallyEqual(lambda: k8s.count_running_pods( + "connection-pooler=acid-minimal-cluster-pooler"), + 0, "Master pooler pods not deleted") + self.eventuallyEqual(lambda: k8s.count_running_pods( + "connection-pooler=acid-minimal-cluster-pooler-repl"), + 2, "Pooler replica pods not found") + self.eventuallyEqual(lambda: k8s.count_services_with_label( + 'application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "No pooler service found") + + # Turn off only replica connection pooler + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': True, + 'enableReplicaConnectionPooler': False, + } + }) + + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, + "Operator does not get in sync") + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 2, + "Deployment replicas is 2 default") + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), + 2, "Master pooler pods not found") + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler-repl"), + 0, "Pooler replica pods not deleted") + self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "No pooler service found") + + # scale up connection pooler deployment + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'connectionPooler': { + 'numberOfInstances': 3, + }, + } + }) + + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 3, + "Deployment replicas is scaled to 3") + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), + 3, "Scale up of pooler pods does not work") + + # turn it off, keeping config should be overwritten by false + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': False, + 'enableReplicaConnectionPooler': False, + } + }) + + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), + 0, "Pooler pods not scaled down") + self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 0, "Pooler service not removed") + + # Verify that all the databases have pooler schema installed. + # Do this via psql, since otherwise we need to deal with + # credentials. + dbList = [] + + leader = k8s.get_cluster_leader_pod('acid-minimal-cluster') + dbListQuery = "select datname from pg_database" + schemasQuery = """ + select schema_name + from information_schema.schemata + where schema_name = 'pooler' + """ + exec_query = r"psql -tAq -c \"{}\" -d {}" + + if leader: + try: + q = exec_query.format(dbListQuery, "postgres") + q = "su postgres -c \"{}\"".format(q) + print('Get databases: {}'.format(q)) + result = k8s.exec_with_kubectl(leader.metadata.name, q) + dbList = clean_list(result.stdout.split(b'\n')) + print('dbList: {}, stdout: {}, stderr {}'.format( + dbList, result.stdout, result.stderr + )) + except Exception as ex: + print('Could not get databases: {}'.format(ex)) + print('Stdout: {}'.format(result.stdout)) + print('Stderr: {}'.format(result.stderr)) + + for db in dbList: + if db in ('template0', 'template1'): + continue + + schemas = [] + try: + q = exec_query.format(schemasQuery, db) + q = "su postgres -c \"{}\"".format(q) + print('Get schemas: {}'.format(q)) + result = k8s.exec_with_kubectl(leader.metadata.name, q) + schemas = clean_list(result.stdout.split(b'\n')) + print('schemas: {}, stdout: {}, stderr {}'.format( + schemas, result.stdout, result.stderr + )) + except Exception as ex: + print('Could not get databases: {}'.format(ex)) + print('Stdout: {}'.format(result.stdout)) + print('Stderr: {}'.format(result.stderr)) + + self.assertNotEqual(len(schemas), 0) + else: + print('Could not find leader pod') + + # remove config section to make test work next time + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'connectionPooler': None, + 'EnableReplicaConnectionPooler': False, + } + }) + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_enable_load_balancer(self): + ''' + Test if services are updated when enabling/disabling load balancers in Postgres manifest + ''' + + k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role={}' + + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + 'ClusterIP', + "Expected ClusterIP type initially, found {}") try: - # enable connection pooler + # enable load balancer services + pg_patch_enable_lbs = { + "spec": { + "enableMasterLoadBalancer": True, + "enableReplicaLoadBalancer": True + } + } k8s.api.custom_objects_api.patch_namespaced_custom_object( - 'acid.zalan.do', 'v1', 'default', - 'postgresqls', 'acid-minimal-cluster', - { - 'spec': { - 'enableConnectionPooler': True, - } - }) - k8s.wait_for_pod_start(pod_selector) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) - pods = k8s.api.core_v1.list_namespaced_pod( - 'default', label_selector=pod_selector - ).items + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + 'LoadBalancer', + "Expected LoadBalancer service type for master, found {}") - self.assertTrue(pods, 'No connection pooler pods') + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("replica")), + 'LoadBalancer', + "Expected LoadBalancer service type for master, found {}") - k8s.wait_for_service(service_selector) - services = k8s.api.core_v1.list_namespaced_service( - 'default', label_selector=service_selector - ).items - services = [ - s for s in services - if s.metadata.name.endswith('pooler') - ] - - self.assertTrue(services, 'No connection pooler service') - - # scale up connection pooler deployment + # disable load balancer services again + pg_patch_disable_lbs = { + "spec": { + "enableMasterLoadBalancer": False, + "enableReplicaLoadBalancer": False + } + } k8s.api.custom_objects_api.patch_namespaced_custom_object( - 'acid.zalan.do', 'v1', 'default', - 'postgresqls', 'acid-minimal-cluster', - { - 'spec': { - 'connectionPooler': { - 'numberOfInstances': 2, - }, - } - }) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) - k8s.wait_for_running_pods(pod_selector, 2) + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + 'ClusterIP', + "Expected LoadBalancer service type for master, found {}") - # turn it off, keeping configuration section - k8s.api.custom_objects_api.patch_namespaced_custom_object( - 'acid.zalan.do', 'v1', 'default', - 'postgresqls', 'acid-minimal-cluster', - { - 'spec': { - 'enableConnectionPooler': False, - } - }) - k8s.wait_for_pods_to_stop(pod_selector) + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("replica")), + 'ClusterIP', + "Expected LoadBalancer service type for master, found {}") except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) raise @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_enable_load_balancer(self): + def test_infrastructure_roles(self): ''' - Test if services are updated when enabling/disabling load balancers + Test using external secrets for infrastructure roles ''' - k8s = self.k8s - cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' - - # enable load balancer services - pg_patch_enable_lbs = { - "spec": { - "enableMasterLoadBalancer": True, - "enableReplicaLoadBalancer": True - } + # update infrastructure roles description + secret_name = "postgresql-infrastructure-roles" + roles = "secretname: postgresql-infrastructure-roles-new, userkey: user,"\ + "rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon" + patch_infrastructure_roles = { + "data": { + "infrastructure_roles_secret_name": secret_name, + "infrastructure_roles_secrets": roles, + }, } - k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) - # wait for service recreation - time.sleep(60) + k8s.update_config(patch_infrastructure_roles) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, + "Operator does not get in sync") - master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') - self.assertEqual(master_svc_type, 'LoadBalancer', - "Expected LoadBalancer service type for master, found {}".format(master_svc_type)) + try: + # check that new roles are represented in the config by requesting the + # operator configuration via API - repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') - self.assertEqual(repl_svc_type, 'LoadBalancer', - "Expected LoadBalancer service type for replica, found {}".format(repl_svc_type)) + def verify_role(): + try: + operator_pod = k8s.get_operator_pod() + get_config_cmd = "wget --quiet -O - localhost:8080/config" + result = k8s.exec_with_kubectl(operator_pod.metadata.name, + get_config_cmd) + try: + roles_dict = (json.loads(result.stdout) + .get("controller", {}) + .get("InfrastructureRoles")) + except: + return False - # disable load balancer services again - pg_patch_disable_lbs = { - "spec": { - "enableMasterLoadBalancer": False, - "enableReplicaLoadBalancer": False - } - } - k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) - # wait for service recreation - time.sleep(60) + if "robot_zmon_acid_monitoring_new" in roles_dict: + role = roles_dict["robot_zmon_acid_monitoring_new"] + role.pop("Password", None) + self.assertDictEqual(role, { + "Name": "robot_zmon_acid_monitoring_new", + "Flags": None, + "MemberOf": ["robot_zmon"], + "Parameters": None, + "AdminRole": "", + "Origin": 2, + }) + return True + except: + pass - master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') - self.assertEqual(master_svc_type, 'ClusterIP', - "Expected ClusterIP service type for master, found {}".format(master_svc_type)) + return False - repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') - self.assertEqual(repl_svc_type, 'ClusterIP', - "Expected ClusterIP service type for replica, found {}".format(repl_svc_type)) + self.eventuallyTrue(verify_role, "infrastructure role setup is not loaded") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_lazy_spilo_upgrade(self): ''' - Test lazy upgrade for the Spilo image: operator changes a stateful set but lets pods run with the old image - until they are recreated for reasons other than operator's activity. That works because the operator configures - stateful sets to use "onDelete" pod update policy. + Test lazy upgrade for the Spilo image: operator changes a stateful set + but lets pods run with the old image until they are recreated for + reasons other than operator's activity. That works because the operator + configures stateful sets to use "onDelete" pod update policy. The test covers: 1) enabling lazy upgrade in existing operator deployment - 2) forcing the normal rolling upgrade by changing the operator configmap and restarting its pod + 2) forcing the normal rolling upgrade by changing the operator + configmap and restarting its pod ''' k8s = self.k8s + pod0 = 'acid-minimal-cluster-0' + pod1 = 'acid-minimal-cluster-1' + + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, + "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), + 2, "Postgres status did not enter running") + + patch_lazy_spilo_upgrade = { + "data": { + "docker_image": SPILO_CURRENT, + "enable_lazy_spilo_upgrade": "false" + } + } + k8s.update_config(patch_lazy_spilo_upgrade, + step="Init baseline image version") + + self.eventuallyEqual(lambda: k8s.get_statefulset_image(), SPILO_CURRENT, + "Statefulset not updated initially") + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, + "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), + 2, "Postgres status did not enter running") + + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), + SPILO_CURRENT, "Rolling upgrade was not executed") + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod1), + SPILO_CURRENT, "Rolling upgrade was not executed") + # update docker image in config and enable the lazy upgrade - conf_image = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114" + conf_image = SPILO_LAZY patch_lazy_spilo_upgrade = { "data": { "docker_image": conf_image, "enable_lazy_spilo_upgrade": "true" } } - k8s.update_config(patch_lazy_spilo_upgrade) + k8s.update_config(patch_lazy_spilo_upgrade, + step="patch image and lazy upgrade") + self.eventuallyEqual(lambda: k8s.get_statefulset_image(), conf_image, + "Statefulset not updated to next Docker image") - pod0 = 'acid-minimal-cluster-0' - pod1 = 'acid-minimal-cluster-1' + try: + # restart the pod to get a container with the new image + k8s.api.core_v1.delete_namespaced_pod(pod0, 'default') - # restart the pod to get a container with the new image - k8s.api.core_v1.delete_namespaced_pod(pod0, 'default') - time.sleep(60) + # verify only pod-0 which was deleted got new image from statefulset + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), + conf_image, "Delete pod-0 did not get new spilo image") + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, + "No two pods running after lazy rolling upgrade") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), + 2, "Postgres status did not enter running") + self.assertNotEqual(lambda: k8s.get_effective_pod_image(pod1), + SPILO_CURRENT, + "pod-1 should not have change Docker image to {}".format(SPILO_CURRENT)) - # lazy update works if the restarted pod and older pods run different Spilo versions - new_image = k8s.get_effective_pod_image(pod0) - old_image = k8s.get_effective_pod_image(pod1) - self.assertNotEqual(new_image, old_image, "Lazy updated failed: pods have the same image {}".format(new_image)) - - # sanity check - assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image) - self.assertEqual(new_image, conf_image, assert_msg) - - # clean up - unpatch_lazy_spilo_upgrade = { - "data": { - "enable_lazy_spilo_upgrade": "false", + # clean up + unpatch_lazy_spilo_upgrade = { + "data": { + "enable_lazy_spilo_upgrade": "false", + } } - } - k8s.update_config(unpatch_lazy_spilo_upgrade) + k8s.update_config(unpatch_lazy_spilo_upgrade, step="patch lazy upgrade") - # at this point operator will complete the normal rolling upgrade - # so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works + # at this point operator will complete the normal rolling upgrade + # so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), + conf_image, "Rolling upgrade was not executed", + 50, 3) + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod1), + conf_image, "Rolling upgrade was not executed", + 50, 3) + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), + 2, "Postgres status did not enter running") - # XXX there is no easy way to wait until the end of Sync() - time.sleep(60) - - image0 = k8s.get_effective_pod_image(pod0) - image1 = k8s.get_effective_pod_image(pod1) - - assert_msg = "Disabling lazy upgrade failed: pods still have different images {} and {}".format(image0, image1) - self.assertEqual(image0, image1, assert_msg) + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_logical_backup_cron_job(self): @@ -279,45 +599,51 @@ class EndToEndTestCase(unittest.TestCase): } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup) - k8s.wait_for_logical_backup_job_creation() - jobs = k8s.get_logical_backup_job().items - self.assertEqual(1, len(jobs), "Expected 1 logical backup job, found {}".format(len(jobs))) + try: + self.eventuallyEqual(lambda: len(k8s.get_logical_backup_job().items), 1, "failed to create logical backup job") - job = jobs[0] - self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster", - "Expected job name {}, found {}" - .format("logical-backup-acid-minimal-cluster", job.metadata.name)) - self.assertEqual(job.spec.schedule, schedule, - "Expected {} schedule, found {}" - .format(schedule, job.spec.schedule)) + job = k8s.get_logical_backup_job().items[0] + self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster", + "Expected job name {}, found {}" + .format("logical-backup-acid-minimal-cluster", job.metadata.name)) + self.assertEqual(job.spec.schedule, schedule, + "Expected {} schedule, found {}" + .format(schedule, job.spec.schedule)) - # update the cluster-wide image of the logical backup pod - image = "test-image-name" - patch_logical_backup_image = { - "data": { - "logical_backup_docker_image": image, + # update the cluster-wide image of the logical backup pod + image = "test-image-name" + patch_logical_backup_image = { + "data": { + "logical_backup_docker_image": image, + } } - } - k8s.update_config(patch_logical_backup_image) + k8s.update_config(patch_logical_backup_image, step="patch logical backup image") - jobs = k8s.get_logical_backup_job().items - actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image - self.assertEqual(actual_image, image, - "Expected job image {}, found {}".format(image, actual_image)) + def get_docker_image(): + jobs = k8s.get_logical_backup_job().items + return jobs[0].spec.job_template.spec.template.spec.containers[0].image - # delete the logical backup cron job - pg_patch_disable_backup = { - "spec": { - "enableLogicalBackup": False, + self.eventuallyEqual(get_docker_image, image, + "Expected job image {}, found {}".format(image, "{}")) + + # delete the logical backup cron job + pg_patch_disable_backup = { + "spec": { + "enableLogicalBackup": False, + } } - } - k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup) - k8s.wait_for_logical_backup_job_deletion() - jobs = k8s.get_logical_backup_job().items - self.assertEqual(0, len(jobs), - "Expected 0 logical backup jobs, found {}".format(len(jobs))) + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup) + + self.eventuallyEqual(lambda: len(k8s.get_logical_backup_job().items), 0, "failed to create logical backup job") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + + # ensure cluster is healthy after tests + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_min_resource_limits(self): @@ -325,20 +651,18 @@ class EndToEndTestCase(unittest.TestCase): Lower resource limits below configured minimum and let operator fix it ''' k8s = self.k8s - cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' - labels = 'spilo-role=master,' + cluster_label - _, failover_targets = k8s.get_pg_nodes(cluster_label) + # self.eventuallyEqual(lambda: k8s.pg_get_status(), "Running", "Cluster not healthy at start") # configure minimum boundaries for CPU and memory limits - minCPULimit = '500m' - minMemoryLimit = '500Mi' + minCPULimit = '503m' + minMemoryLimit = '502Mi' + patch_min_resource_limits = { "data": { "min_cpu_limit": minCPULimit, "min_memory_limit": minMemoryLimit } } - k8s.update_config(patch_min_resource_limits) # lower resource limits below minimum pg_patch_resources = { @@ -357,20 +681,31 @@ class EndToEndTestCase(unittest.TestCase): } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources) - k8s.wait_for_pod_failover(failover_targets, labels) - k8s.wait_for_pod_start('spilo-role=replica') - pods = k8s.api.core_v1.list_namespaced_pod( - 'default', label_selector=labels).items - self.assert_master_is_unique() - masterPod = pods[0] + k8s.patch_statefulset({"metadata": {"annotations": {"zalando-postgres-operator-rolling-update-required": "False"}}}) + k8s.update_config(patch_min_resource_limits, "Minimum resource test") - self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit, - "Expected CPU limit {}, found {}" - .format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu'])) - self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit, - "Expected memory limit {}, found {}" - .format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory'])) + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No two pods running after lazy rolling upgrade") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running") + + def verify_pod_limits(): + pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items + if len(pods) < 2: + return False + + r = pods[0].spec.containers[0].resources.limits['memory'] == minMemoryLimit + r = r and pods[0].spec.containers[0].resources.limits['cpu'] == minCPULimit + r = r and pods[1].spec.containers[0].resources.limits['memory'] == minMemoryLimit + r = r and pods[1].spec.containers[0].resources.limits['cpu'] == minCPULimit + return r + + self.eventuallyTrue(verify_pod_limits, "Pod limits where not adjusted") + + @classmethod + def setUp(cls): + # cls.k8s.update_config({}, step="Setup") + cls.k8s.patch_statefulset({"meta": {"annotations": {"zalando-postgres-operator-rolling-update-required": False}}}) + pass @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_multi_namespace_support(self): @@ -381,15 +716,28 @@ class EndToEndTestCase(unittest.TestCase): with open("manifests/complete-postgres-manifest.yaml", 'r+') as f: pg_manifest = yaml.safe_load(f) - pg_manifest["metadata"]["namespace"] = self.namespace + pg_manifest["metadata"]["namespace"] = self.test_namespace yaml.dump(pg_manifest, f, Dumper=yaml.Dumper) - k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") - k8s.wait_for_pod_start("spilo-role=master", self.namespace) - self.assert_master_is_unique(self.namespace, "acid-test-cluster") + try: + k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") + k8s.wait_for_pod_start("spilo-role=master", self.test_namespace) + self.assert_master_is_unique(self.test_namespace, "acid-test-cluster") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + finally: + # delete the new cluster so that the k8s_api.get_operator_state works correctly in subsequent tests + # ideally we should delete the 'test' namespace here but + # the pods inside the namespace stuck in the Terminating state making the test time out + k8s.api.custom_objects_api.delete_namespaced_custom_object( + "acid.zalan.do", "v1", self.test_namespace, "postgresqls", "acid-test-cluster") + time.sleep(5) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_node_readiness_label(self): + def test_zz_node_readiness_label(self): ''' Remove node readiness label from master node. This must cause a failover. ''' @@ -398,40 +746,43 @@ class EndToEndTestCase(unittest.TestCase): readiness_label = 'lifecycle-status' readiness_value = 'ready' - # get nodes of master and replica(s) (expected target of new master) - current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label) - num_replicas = len(current_replica_nodes) - failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes) + try: + # get nodes of master and replica(s) (expected target of new master) + current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label) + num_replicas = len(current_replica_nodes) + failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes) - # add node_readiness_label to potential failover nodes - patch_readiness_label = { - "metadata": { - "labels": { - readiness_label: readiness_value + # add node_readiness_label to potential failover nodes + patch_readiness_label = { + "metadata": { + "labels": { + readiness_label: readiness_value + } } } - } - for failover_target in failover_targets: - k8s.api.core_v1.patch_node(failover_target, patch_readiness_label) + self.assertTrue(len(failover_targets) > 0, "No failover targets available") + for failover_target in failover_targets: + k8s.api.core_v1.patch_node(failover_target, patch_readiness_label) - # define node_readiness_label in config map which should trigger a failover of the master - patch_readiness_label_config = { - "data": { - "node_readiness_label": readiness_label + ':' + readiness_value, + # define node_readiness_label in config map which should trigger a failover of the master + patch_readiness_label_config = { + "data": { + "node_readiness_label": readiness_label + ':' + readiness_value, + } } - } - k8s.update_config(patch_readiness_label_config) - new_master_node, new_replica_nodes = self.assert_failover( - current_master_node, num_replicas, failover_targets, cluster_label) + k8s.update_config(patch_readiness_label_config, "setting readiness label") + new_master_node, new_replica_nodes = self.assert_failover( + current_master_node, num_replicas, failover_targets, cluster_label) - # patch also node where master ran before - k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label) + # patch also node where master ran before + k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label) - # wait a little before proceeding with the pod distribution test - time.sleep(30) + # toggle pod anti affinity to move replica away from master node + self.eventuallyTrue(lambda: self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label), "Pods are redistributed") - # toggle pod anti affinity to move replica away from master node - self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_scaling(self): @@ -439,20 +790,20 @@ class EndToEndTestCase(unittest.TestCase): Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. ''' k8s = self.k8s - labels = "application=spilo,cluster-name=acid-minimal-cluster" + pod = "acid-minimal-cluster-0" - k8s.wait_for_pg_to_scale(3) - self.assertEqual(3, k8s.count_pods_with_label(labels)) - self.assert_master_is_unique() + k8s.scale_cluster(3) + self.eventuallyEqual(lambda: k8s.count_running_pods(), 3, "Scale up to 3 failed") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 3, "Not all 3 nodes healthy") - k8s.wait_for_pg_to_scale(2) - self.assertEqual(2, k8s.count_pods_with_label(labels)) - self.assert_master_is_unique() + k8s.scale_cluster(2) + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "Scale down to 2 failed") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 2, "Not all members 2 healthy") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_service_annotations(self): ''' - Create a Postgres cluster with service annotations and check them. + Create a Postgres cluster with service annotations and check them. ''' k8s = self.k8s patch_custom_service_annotations = { @@ -466,23 +817,21 @@ class EndToEndTestCase(unittest.TestCase): "spec": { "serviceAnnotations": { "annotation.key": "value", - "foo": "bar", + "alice": "bob", } } } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations) - # wait a little before proceeding - time.sleep(30) annotations = { "annotation.key": "value", "foo": "bar", + "alice": "bob" } - self.assertTrue(k8s.check_service_annotations( - "cluster-name=acid-minimal-cluster,spilo-role=master", annotations)) - self.assertTrue(k8s.check_service_annotations( - "cluster-name=acid-minimal-cluster,spilo-role=replica", annotations)) + + self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=master", annotations), "Wrong annotations") + self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=replica", annotations), "Wrong annotations") # clean up unpatch_custom_service_annotations = { @@ -503,6 +852,7 @@ class EndToEndTestCase(unittest.TestCase): patch_sset_propagate_annotations = { "data": { "downscaler_annotations": "deployment-time,downscaler/*", + "inherited_annotations": "owned-by", } } k8s.update_config(patch_sset_propagate_annotations) @@ -512,32 +862,39 @@ class EndToEndTestCase(unittest.TestCase): "annotations": { "deployment-time": "2020-04-30 12:00:00", "downscaler/downtime_replicas": "0", + "owned-by": "acid", }, } } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations) - # wait a little before proceeding - time.sleep(60) annotations = { "deployment-time": "2020-04-30 12:00:00", "downscaler/downtime_replicas": "0", + "owned-by": "acid", } - self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations)) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_taint_based_eviction(self): + @unittest.skip("Skipping this test until fixed") + def test_zzz_taint_based_eviction(self): ''' Add taint "postgres=:NoExecute" to node with master. This must cause a failover. ''' k8s = self.k8s cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + # verify we are in good state from potential previous tests + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") + # get nodes of master and replica(s) (expected target of new master) - current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label) - num_replicas = len(current_replica_nodes) - failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes) + master_nodes, replica_nodes = k8s.get_cluster_nodes() + + self.assertNotEqual(master_nodes, []) + self.assertNotEqual(replica_nodes, []) # taint node with postgres=:NoExecute to force failover body = { @@ -551,10 +908,9 @@ class EndToEndTestCase(unittest.TestCase): } } - # patch node and test if master is failing over to one of the expected nodes - k8s.api.core_v1.patch_node(current_master_node, body) - new_master_node, new_replica_nodes = self.assert_failover( - current_master_node, num_replicas, failover_targets, cluster_label) + k8s.api.core_v1.patch_node(master_nodes[0], body) + self.eventuallyTrue(lambda: k8s.get_cluster_nodes()[0], replica_nodes) + self.assertNotEqual(lambda: k8s.get_cluster_nodes()[0], master_nodes) # add toleration to pods patch_toleration_config = { @@ -562,25 +918,220 @@ class EndToEndTestCase(unittest.TestCase): "toleration": "key:postgres,operator:Exists,effect:NoExecute" } } - k8s.update_config(patch_toleration_config) - # wait a little before proceeding with the pod distribution test - time.sleep(30) + k8s.update_config(patch_toleration_config, step="allow tainted nodes") + + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") # toggle pod anti affinity to move replica away from master node + nm, new_replica_nodes = k8s.get_cluster_nodes() + new_master_node = nm[0] self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_node_affinity(self): + ''' + Add label to a node and update postgres cluster spec to deploy only on a node with that label + ''' + k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + + # verify we are in good state from potential previous tests + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # get nodes of master and replica(s) + master_node, replica_nodes = k8s.get_pg_nodes(cluster_label) + + self.assertNotEqual(master_node, []) + self.assertNotEqual(replica_nodes, []) + + # label node with environment=postgres + node_label_body = { + "metadata": { + "labels": { + "node-affinity-test": "postgres" + } + } + } + + try: + # patch current master node with the label + print('patching master node: {}'.format(master_node)) + k8s.api.core_v1.patch_node(master_node, node_label_body) + + # add node affinity to cluster + patch_node_affinity_config = { + "spec": { + "nodeAffinity" : { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "node-affinity-test", + "operator": "In", + "values": [ + "postgres" + ] + } + ] + } + ] + } + } + } + } + + k8s.api.custom_objects_api.patch_namespaced_custom_object( + group="acid.zalan.do", + version="v1", + namespace="default", + plural="postgresqls", + name="acid-minimal-cluster", + body=patch_node_affinity_config) + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement + k8s.wait_for_pod_failover(master_node, 'spilo-role=replica,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + + podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'replica': + self.assertEqual(master_node, pod.spec.node_name, + "Sanity check: expected replica to relocate to master node {}, but found on {}".format(master_node, pod.spec.node_name)) + + # check that pod has correct node affinity + key = pod.spec.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms[0].match_expressions[0].key + value = pod.spec.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms[0].match_expressions[0].values[0] + self.assertEqual("node-affinity-test", key, + "Sanity check: expect node selector key to be equal to 'node-affinity-test' but got {}".format(key)) + self.assertEqual("postgres", value, + "Sanity check: expect node selector value to be equal to 'postgres' but got {}".format(value)) + + patch_node_remove_affinity_config = { + "spec": { + "nodeAffinity" : None + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + group="acid.zalan.do", + version="v1", + namespace="default", + plural="postgresqls", + name="acid-minimal-cluster", + body=patch_node_remove_affinity_config) + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # remove node affinity to move replica away from master node + nm, new_replica_nodes = k8s.get_cluster_nodes() + new_master_node = nm[0] + self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_zzzz_cluster_deletion(self): + ''' + Test deletion with configured protection + ''' + k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + + # configure delete protection + patch_delete_annotations = { + "data": { + "delete_annotation_date_key": "delete-date", + "delete_annotation_name_key": "delete-clustername" + } + } + k8s.update_config(patch_delete_annotations) + time.sleep(25) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + try: + # this delete attempt should be omitted because of missing annotations + k8s.api.custom_objects_api.delete_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster") + time.sleep(15) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # check that pods and services are still there + k8s.wait_for_running_pods(cluster_label, 2) + k8s.wait_for_service(cluster_label) + + # recreate Postgres cluster resource + k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") + + # wait a little before proceeding + time.sleep(10) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # add annotations to manifest + delete_date = datetime.today().strftime('%Y-%m-%d') + pg_patch_delete_annotations = { + "metadata": { + "annotations": { + "delete-date": delete_date, + "delete-clustername": "acid-minimal-cluster", + } + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # wait a little before proceeding + time.sleep(20) + k8s.wait_for_running_pods(cluster_label, 2) + k8s.wait_for_service(cluster_label) + + # now delete process should be triggered + k8s.api.custom_objects_api.delete_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster") + + self.eventuallyEqual(lambda: len(k8s.api.custom_objects_api.list_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", label_selector="cluster-name=acid-minimal-cluster")["items"]), 0, "Manifest not deleted") + + # check if everything has been deleted + self.eventuallyEqual(lambda: k8s.count_pods_with_label(cluster_label), 0, "Pods not deleted") + self.eventuallyEqual(lambda: k8s.count_services_with_label(cluster_label), 0, "Service not deleted") + self.eventuallyEqual(lambda: k8s.count_endpoints_with_label(cluster_label), 0, "Endpoints not deleted") + self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted") + self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted") + self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets not deleted") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + + # reset configmap + patch_delete_annotations = { + "data": { + "delete_annotation_date_key": "", + "delete_annotation_name_key": "" + } + } + k8s.update_config(patch_delete_annotations) + def get_failover_targets(self, master_node, replica_nodes): ''' If all pods live on the same node, failover will happen to other worker(s) ''' k8s = self.k8s + k8s_master_exclusion = 'kubernetes.io/hostname!=postgres-operator-e2e-tests-control-plane' failover_targets = [x for x in replica_nodes if x != master_node] if len(failover_targets) == 0: - nodes = k8s.api.core_v1.list_node() + nodes = k8s.api.core_v1.list_node(label_selector=k8s_master_exclusion) for n in nodes.items: - if "node-role.kubernetes.io/master" not in n.metadata.labels and n.metadata.name != master_node: + if n.metadata.name != master_node: failover_targets.append(n.metadata.name) return failover_targets @@ -627,9 +1178,8 @@ class EndToEndTestCase(unittest.TestCase): "enable_pod_antiaffinity": "true" } } - k8s.update_config(patch_enable_antiaffinity) - self.assert_failover( - master_node, len(replica_nodes), failover_targets, cluster_label) + k8s.update_config(patch_enable_antiaffinity, "enable antiaffinity") + self.assert_failover(master_node, len(replica_nodes), failover_targets, cluster_label) # now disable pod anti affintiy again which will cause yet another failover patch_disable_antiaffinity = { @@ -637,198 +1187,11 @@ class EndToEndTestCase(unittest.TestCase): "enable_pod_antiaffinity": "false" } } - k8s.update_config(patch_disable_antiaffinity) + k8s.update_config(patch_disable_antiaffinity, "disable antiaffinity") k8s.wait_for_pod_start('spilo-role=master') k8s.wait_for_pod_start('spilo-role=replica') - - -class K8sApi: - - def __init__(self): - - # https://github.com/kubernetes-client/python/issues/309 - warnings.simplefilter("ignore", ResourceWarning) - - self.config = config.load_kube_config() - self.k8s_client = client.ApiClient() - - self.core_v1 = client.CoreV1Api() - self.apps_v1 = client.AppsV1Api() - self.batch_v1_beta1 = client.BatchV1beta1Api() - self.custom_objects_api = client.CustomObjectsApi() - - -class K8s: - ''' - Wraps around K8 api client and helper methods. - ''' - - RETRY_TIMEOUT_SEC = 10 - - def __init__(self): - self.api = K8sApi() - - def get_pg_nodes(self, pg_cluster_name, namespace='default'): - master_pod_node = '' - replica_pod_nodes = [] - podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name) - for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master': - master_pod_node = pod.spec.node_name - elif pod.metadata.labels.get('spilo-role') == 'replica': - replica_pod_nodes.append(pod.spec.node_name) - - return master_pod_node, replica_pod_nodes - - def wait_for_operator_pod_start(self): - self. wait_for_pod_start("name=postgres-operator") - # HACK operator must register CRD and/or Sync existing PG clusters after start up - # for local execution ~ 10 seconds suffices - time.sleep(60) - - def get_operator_pod(self): - pods = self.api.core_v1.list_namespaced_pod( - 'default', label_selector='name=postgres-operator' - ).items - - if pods: - return pods[0] - - return None - - def get_operator_log(self): - operator_pod = self.get_operator_pod() - pod_name = operator_pod.metadata.name - return self.api.core_v1.read_namespaced_pod_log( - name=pod_name, - namespace='default' - ) - - def wait_for_pod_start(self, pod_labels, namespace='default'): - pod_phase = 'No pod running' - while pod_phase != 'Running': - pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items - if pods: - pod_phase = pods[0].status.phase - - if pods and pod_phase != 'Running': - pod_name = pods[0].metadata.name - response = self.api.core_v1.read_namespaced_pod( - name=pod_name, - namespace=namespace - ) - print("Pod description {}".format(response)) - - time.sleep(self.RETRY_TIMEOUT_SEC) - - def get_service_type(self, svc_labels, namespace='default'): - svc_type = '' - svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items - for svc in svcs: - svc_type = svc.spec.type - return svc_type - - def check_service_annotations(self, svc_labels, annotations, namespace='default'): - svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items - for svc in svcs: - for key, value in annotations.items(): - if key not in svc.metadata.annotations or svc.metadata.annotations[key] != value: - print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotation)) - return False return True - def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'): - ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items - for sset in ssets: - for key, value in annotations.items(): - if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value: - print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotation)) - return False - return True - - def wait_for_pg_to_scale(self, number_of_instances, namespace='default'): - - body = { - "spec": { - "numberOfInstances": number_of_instances - } - } - _ = self.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) - - labels = 'application=spilo,cluster-name=acid-minimal-cluster' - while self.count_pods_with_label(labels) != number_of_instances: - time.sleep(self.RETRY_TIMEOUT_SEC) - - def wait_for_running_pods(self, labels, number, namespace=''): - while self.count_pods_with_label(labels) != number: - time.sleep(self.RETRY_TIMEOUT_SEC) - - def wait_for_pods_to_stop(self, labels, namespace=''): - while self.count_pods_with_label(labels) != 0: - time.sleep(self.RETRY_TIMEOUT_SEC) - - def wait_for_service(self, labels, namespace='default'): - def get_services(): - return self.api.core_v1.list_namespaced_service( - namespace, label_selector=labels - ).items - - while not get_services(): - time.sleep(self.RETRY_TIMEOUT_SEC) - - def count_pods_with_label(self, labels, namespace='default'): - return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) - - def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): - pod_phase = 'Failing over' - new_pod_node = '' - - while (pod_phase != 'Running') or (new_pod_node not in failover_targets): - pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items - if pods: - new_pod_node = pods[0].spec.node_name - pod_phase = pods[0].status.phase - time.sleep(self.RETRY_TIMEOUT_SEC) - - def get_logical_backup_job(self, namespace='default'): - return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") - - def wait_for_logical_backup_job(self, expected_num_of_jobs): - while (len(self.get_logical_backup_job().items) != expected_num_of_jobs): - time.sleep(self.RETRY_TIMEOUT_SEC) - - def wait_for_logical_backup_job_deletion(self): - self.wait_for_logical_backup_job(expected_num_of_jobs=0) - - def wait_for_logical_backup_job_creation(self): - self.wait_for_logical_backup_job(expected_num_of_jobs=1) - - def delete_operator_pod(self): - operator_pod = self.api.core_v1.list_namespaced_pod( - 'default', label_selector="name=postgres-operator").items[0].metadata.name - self.api.core_v1.delete_namespaced_pod(operator_pod, "default") # restart reloads the conf - self.wait_for_operator_pod_start() - - def update_config(self, config_map_patch): - self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) - self.delete_operator_pod() - - def create_with_kubectl(self, path): - return subprocess.run( - ["kubectl", "create", "-f", path], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - def get_effective_pod_image(self, pod_name, namespace='default'): - ''' - Get the Spilo image pod currently uses. In case of lazy rolling updates - it may differ from the one specified in the stateful set. - ''' - pod = self.api.core_v1.list_namespaced_pod( - namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name) - return pod.items[0].spec.containers[0].image - if __name__ == '__main__': unittest.main() diff --git a/go.mod b/go.mod index dc6389a1c..4e9c8a742 100644 --- a/go.mod +++ b/go.mod @@ -1,23 +1,22 @@ module github.com/zalando/postgres-operator -go 1.14 +go 1.15 require ( - github.com/aws/aws-sdk-go v1.29.33 - github.com/emicklei/go-restful v2.9.6+incompatible // indirect - github.com/evanphx/json-patch v4.5.0+incompatible // indirect - github.com/googleapis/gnostic v0.3.0 // indirect - github.com/lib/pq v1.3.0 + github.com/aws/aws-sdk-go v1.36.3 + github.com/golang/mock v1.4.4 + github.com/lib/pq v1.9.0 github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d - github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a - github.com/sirupsen/logrus v1.5.0 - github.com/stretchr/testify v1.4.0 - golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b // indirect - gopkg.in/yaml.v2 v2.2.8 - k8s.io/api v0.18.2 - k8s.io/apiextensions-apiserver v0.18.2 - k8s.io/apimachinery v0.18.2 - k8s.io/client-go v11.0.0+incompatible - k8s.io/code-generator v0.18.2 - sigs.k8s.io/kind v0.5.1 // indirect + github.com/r3labs/diff v1.1.0 + github.com/sirupsen/logrus v1.7.0 + github.com/stretchr/testify v1.6.1 + golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c + golang.org/x/mod v0.4.0 // indirect + golang.org/x/tools v0.0.0-20201207204333-a835c872fcea // indirect + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.19.4 + k8s.io/apiextensions-apiserver v0.19.3 + k8s.io/apimachinery v0.19.4 + k8s.io/client-go v0.19.3 + k8s.io/code-generator v0.19.4 ) diff --git a/go.sum b/go.sum index 22be07f7a..d8df3fda4 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,33 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= @@ -21,41 +37,57 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.29.33 h1:WP85+WHalTFQR2wYp5xR2sjiVAZXew2bBQXGU1QJBXI= -github.com/aws/aws-sdk-go v1.29.33/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/aws/aws-sdk-go v1.36.3 h1:KYpG5OegwW3xgOsMxy01nj/Td281yxi1Ha2lJQJs4tI= +github.com/aws/aws-sdk-go v1.36.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= @@ -63,31 +95,35 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.6+incompatible h1:tfrHha8zJ01ywiOEC1miGY8st1/igzWB8OmvPgoYX7w= -github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= @@ -105,9 +141,11 @@ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= @@ -118,6 +156,7 @@ github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8 github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -127,53 +166,67 @@ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tF github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5 h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -181,41 +234,43 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190620125010-da37f6c1e481/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= @@ -223,29 +278,32 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+pW6rOkFdld9QQ7jRydBKKM6jyPVI= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -255,90 +313,133 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a h1:2v4Ipjxa3sh+xn6GvtgrMub2ci4ZLQMvTaYIba2lfdc= -github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a/go.mod h1:ozniNEFS3j1qCwHKdvraMn1WJOsUxHd7lYfukEIS4cs= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M= +github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= -github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5 h1:Gqga3zA9tdAcfqobUGjSoCob5L3f8Dt5EuOp3ihNZko= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c h1:9HhBz5L/UjnK9XLtiZhYAdue5BVKep3PMmS2LuPDt8k= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -350,52 +451,74 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190621203818-d432491b9138/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -404,40 +527,83 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b h1:zSzQJAznWxAh9fZxiPy2FZo+ZZEYoYFYYDYdOrU7AaM= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201207204333-a835c872fcea h1:LgKM3cNs8xO6GK1ZVK0nasPn7IN39Sz9EBTwQLyishk= +golang.org/x/tools v0.0.0-20201207204333-a835c872fcea/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -446,57 +612,51 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= -k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8= -k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= -k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= -k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= -k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE= -k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso= -k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/kind v0.5.1 h1:BYnHEJ9DC+0Yjlyyehqd3xnKtEmFdLKU8QxqOqvQzdw= -sigs.k8s.io/kind v0.5.1/go.mod h1:L+Kcoo83/D1+ryU5P2VFbvYm0oqbkJn9zTZq0KNxW68= -sigs.k8s.io/kustomize/v3 v3.1.1-0.20190821175718-4b67a6de1296 h1:iQaIG5Dq+3qSiaFrJ/l/0MjjxKmdwyVNpKRYJwUe/+0= -sigs.k8s.io/kustomize/v3 v3.1.1-0.20190821175718-4b67a6de1296/go.mod h1:ztX4zYc/QIww3gSripwF7TBOarBTm5BvyAMem0kCzOE= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= +k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= +k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= +k8s.io/apiextensions-apiserver v0.19.3 h1:WZxBypSHW4SdXHbdPTS/Jy7L2la6Niggs8BuU5o+avo= +k8s.io/apiextensions-apiserver v0.19.3/go.mod h1:igVEkrE9TzInc1tYE7qSqxaLg/rEAp6B5+k9Q7+IC8Q= +k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= +k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apiserver v0.19.3/go.mod h1:bx6dMm+H6ifgKFpCQT/SAhPwhzoeIMlHIaibomUDec0= +k8s.io/client-go v0.19.3 h1:ctqR1nQ52NUs6LpI0w+a5U+xjYwflFwA13OJKcicMxg= +k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM= +k8s.io/code-generator v0.19.3/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.19.4 h1:c8IL7RgTgJaYgr2bYMgjN0WikHnohbBhEgajfIkuP5I= +k8s.io/code-generator v0.19.4/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/component-base v0.19.3/go.mod h1:WhLWSIefQn8W8jxSLl5WNiR6z8oyMe/8Zywg7alOkRc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 h1:t4L10Qfx/p7ASH3gXCdIUtPbbIuegCoUJf3TMSFekjw= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/kubectl-pg/cmd/check.go b/kubectl-pg/cmd/check.go index 266047cf0..4f88e7efa 100644 --- a/kubectl-pg/cmd/check.go +++ b/kubectl-pg/cmd/check.go @@ -24,19 +24,20 @@ package cmd import ( "fmt" + "log" + "github.com/spf13/cobra" postgresConstants "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextbeta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "log" ) // checkCmd represent kubectl pg check. var checkCmd = &cobra.Command{ Use: "check", Short: "Checks the Postgres operator is installed in the k8s cluster", - Long: `Checks that the Postgres CRD is registered in a k8s cluster. + Long: `Checks that the Postgres CRD is registered in a k8s cluster. This means that the operator pod was able to start normally.`, Run: func(cmd *cobra.Command, args []string) { check() @@ -47,9 +48,9 @@ kubectl pg check } // check validates postgresql CRD registered or not. -func check() *v1beta1.CustomResourceDefinition { +func check() *v1.CustomResourceDefinition { config := getConfig() - apiExtClient, err := apiextbeta1.NewForConfig(config) + apiExtClient, err := apiextv1.NewForConfig(config) if err != nil { log.Fatal(err) } diff --git a/kubectl-pg/cmd/util.go b/kubectl-pg/cmd/util.go index 329f9a28f..a2a5c2073 100644 --- a/kubectl-pg/cmd/util.go +++ b/kubectl-pg/cmd/util.go @@ -25,6 +25,13 @@ package cmd import ( "flag" "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,12 +39,6 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/homedir" - "log" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" ) const ( @@ -88,7 +89,7 @@ func confirmAction(clusterName string, namespace string) { } clusterDetails := strings.Split(confirmClusterDetails, "/") if clusterDetails[0] != namespace || clusterDetails[1] != clusterName { - fmt.Printf("cluster name or namespace doesn't match. Please re-enter %s/%s\nHint: Press (ctrl+c) to exit\n", namespace, clusterName) + fmt.Printf("cluster name or namespace does not match. Please re-enter %s/%s\nHint: Press (ctrl+c) to exit\n", namespace, clusterName) } else { return } diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index e626d6b26..412bac29b 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -6,8 +6,10 @@ metadata: # environment: demo # annotations: # "acid.zalan.do/controller": "second-operator" +# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured +# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured spec: - dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 + dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 teamId: "acid" numberOfInstances: 2 users: # Application/Robot users @@ -16,7 +18,8 @@ spec: - createdb enableMasterLoadBalancer: false enableReplicaLoadBalancer: false -# enableConnectionPooler: true # not needed when connectionPooler section is present (see below) + enableConnectionPooler: false # enable/disable connection pooler deployment + enableReplicaConnectionPooler: false # set to enable connectionPooler for replica service allowedSourceRanges: # load balancers' source ranges for both master and replica services - 127.0.0.1/32 databases: @@ -33,8 +36,8 @@ spec: defaultRoles: true defaultUsers: false postgresql: - version: "12" - parameters: # Expert section + version: "13" + parameters: # Expert section shared_buffers: "32MB" max_connections: "10" log_statement: "all" @@ -66,6 +69,8 @@ spec: # name: my-config-map enableShmVolume: true +# spiloRunAsUser: 101 +# spiloRunAsGroup: 103 # spiloFSGroup: 103 # podAnnotations: # annotation.key: value @@ -88,9 +93,9 @@ spec: encoding: "UTF8" locale: "en_US.UTF-8" data-checksums: "true" - pg_hba: - - hostssl all all 0.0.0.0/0 md5 - - host all all 0.0.0.0/0 md5 +# pg_hba: +# - hostssl all all 0.0.0.0/0 md5 +# - host all all 0.0.0.0/0 md5 # slots: # permanent_physical_1: # type: physical @@ -122,18 +127,19 @@ spec: # - 01:00-06:00 #UTC # - Sat:00:00-04:00 - connectionPooler: - numberOfInstances: 2 - mode: "transaction" - schema: "pooler" - user: "pooler" - resources: - requests: - cpu: 300m - memory: 100Mi - limits: - cpu: "1" - memory: 100Mi +# overwrite custom properties for connection pooler deployments +# connectionPooler: +# numberOfInstances: 2 +# mode: "transaction" +# schema: "pooler" +# user: "pooler" +# resources: +# requests: +# cpu: 300m +# memory: 100Mi +# limits: +# cpu: "1" +# memory: 100Mi initContainers: - name: date @@ -166,3 +172,14 @@ spec: # When TLS is enabled, also set spiloFSGroup parameter above to the relevant value. # if unknown, set it to 103 which is the usual value in the default spilo images. # In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup. + +# Add node affinity support by allowing postgres pods to schedule only on nodes that +# have label: "postgres-operator:enabled" set. +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: postgres-operator +# operator: In +# values: +# - enabled diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 4314b41d3..ce5d47b62 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -15,7 +15,7 @@ data: # connection_pooler_default_cpu_request: "500m" # connection_pooler_default_memory_limit: 100Mi # connection_pooler_default_memory_request: 100Mi - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7" + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-12" # connection_pooler_max_db_connections: 60 # connection_pooler_mode: "transaction" # connection_pooler_number_of_instances: 2 @@ -29,28 +29,40 @@ data: # default_cpu_request: 100m # default_memory_limit: 500Mi # default_memory_request: 100Mi - docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 + # delete_annotation_date_key: delete-date + # delete_annotation_name_key: delete-clustername + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 # downscaler_annotations: "deployment-time,downscaler/*" # enable_admin_role_for_users: "true" # enable_crd_validation: "true" # enable_database_access: "true" + enable_ebs_gp3_migration: "false" + # enable_ebs_gp3_migration_max_size: "1000" # enable_init_containers: "true" # enable_lazy_spilo_upgrade: "false" enable_master_load_balancer: "false" + enable_pgversion_env_var: "true" # enable_pod_antiaffinity: "false" # enable_pod_disruption_budget: "true" + # enable_postgres_team_crd: "false" + # enable_postgres_team_crd_superusers: "false" enable_replica_load_balancer: "false" # enable_shm_volume: "true" # enable_sidecars: "true" + enable_spilo_wal_path_compat: "true" # enable_team_superuser: "false" enable_teams_api: "false" # etcd_host: "" + external_traffic_policy: "Cluster" + # gcp_credentials: "" # kubernetes_use_configmaps: "false" - # infrastructure_roles_secret_name: postgresql-infrastructure-roles + # infrastructure_roles_secret_name: "postgresql-infrastructure-roles" + # infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole" + # inherited_annotations: owned-by # inherited_labels: application,environment # kube_iam_role: "" # log_s3_bucket: "" - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0" # logical_backup_s3_access_key_id: "" logical_backup_s3_bucket: "my-bucket-url" # logical_backup_s3_region: "" @@ -73,8 +85,10 @@ data: # pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_deletion_wait_timeout: 10m # pod_environment_configmap: "default/my-custom-config" + # pod_environment_secret: "my-custom-secret" pod_label_wait_timeout: 10m pod_management_policy: "ordered_ready" + # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role # pod_service_account_definition: "" pod_service_account_name: "postgres-pod" @@ -94,12 +108,17 @@ data: secret_name_template: "{username}.{cluster}.credentials" # sidecar_docker_images: "" # set_memory_request_to_limit: "false" + # spilo_runasuser: 101 + # spilo_runasgroup: 103 + # spilo_fsgroup: 103 spilo_privileged: "false" + storage_resize_mode: "pvc" super_username: postgres # team_admin_role: "admin" # team_api_role_configuration: "log_statement:all" # teams_api_url: http://fake-teams-api.default.svc.cluster.local # toleration: "" + # wal_gs_bucket: "" # wal_s3_bucket: "" watched_namespace: "*" # listen to all namespaces - workers: "4" + workers: "16" diff --git a/manifests/custom-team-membership.yaml b/manifests/custom-team-membership.yaml new file mode 100644 index 000000000..9af153962 --- /dev/null +++ b/manifests/custom-team-membership.yaml @@ -0,0 +1,13 @@ +apiVersion: "acid.zalan.do/v1" +kind: PostgresTeam +metadata: + name: custom-team-membership +spec: + additionalSuperuserTeams: + acid: + - "postgres_superusers" + additionalTeams: + acid: [] + additionalMembers: + acid: + - "elephant" diff --git a/manifests/e2e-storage-class.yaml b/manifests/e2e-storage-class.yaml new file mode 100644 index 000000000..c8d941341 --- /dev/null +++ b/manifests/e2e-storage-class.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + namespace: kube-system + name: standard + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: kubernetes.io/host-path diff --git a/manifests/fake-teams-api.yaml b/manifests/fake-teams-api.yaml index 97d1b2a98..15f7c7576 100644 --- a/manifests/fake-teams-api.yaml +++ b/manifests/fake-teams-api.yaml @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: fake-teams-api diff --git a/manifests/infrastructure-roles-new.yaml b/manifests/infrastructure-roles-new.yaml new file mode 100644 index 000000000..64b854c6a --- /dev/null +++ b/manifests/infrastructure-roles-new.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +data: + # infrastructure role definition in the new format + # robot_zmon_acid_monitoring_new + user: cm9ib3Rfem1vbl9hY2lkX21vbml0b3JpbmdfbmV3 + # foobar_new + password: Zm9vYmFyX25ldw== +kind: Secret +metadata: + name: postgresql-infrastructure-roles-new + namespace: default +type: Opaque diff --git a/manifests/infrastructure-roles.yaml b/manifests/infrastructure-roles.yaml index 3c2d86850..c66d79139 100644 --- a/manifests/infrastructure-roles.yaml +++ b/manifests/infrastructure-roles.yaml @@ -7,12 +7,14 @@ data: # provide other options in the configmap. # robot_zmon_acid_monitoring user1: cm9ib3Rfem1vbl9hY2lkX21vbml0b3Jpbmc= + # foobar + password1: Zm9vYmFy # robot_zmon inrole1: cm9ib3Rfem1vbg== # testuser user2: dGVzdHVzZXI= - # foobar - password2: Zm9vYmFy + # testpassword + password2: dGVzdHBhc3N3b3Jk # user batman with the password justice # look for other fields in the infrastructure roles configmap batman: anVzdGljZQ== diff --git a/manifests/minimal-fake-pooler-deployment.yaml b/manifests/minimal-fake-pooler-deployment.yaml new file mode 100644 index 000000000..5ee8cf05f --- /dev/null +++ b/manifests/minimal-fake-pooler-deployment.yaml @@ -0,0 +1,35 @@ +# will not run but is good enough for tests to fail +apiVersion: apps/v1 +kind: Deployment +metadata: + name: acid-minimal-cluster-pooler + labels: + application: db-connection-pooler + connection-pooler: acid-minimal-cluster-pooler +spec: + replicas: 1 + selector: + matchLabels: + application: db-connection-pooler + connection-pooler: acid-minimal-cluster-pooler + cluster-name: acid-minimal-cluster + template: + metadata: + labels: + application: db-connection-pooler + connection-pooler: acid-minimal-cluster-pooler + cluster-name: acid-minimal-cluster + spec: + serviceAccountName: postgres-operator + containers: + - name: postgres-operator + image: registry.opensource.zalan.do/acid/pgbouncer:master-12 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + memory: 250Mi + limits: + cpu: 500m + memory: 500Mi + env: [] diff --git a/manifests/minimal-postgres-manifest.yaml b/manifests/minimal-postgres-manifest.yaml index 4dd6b7ee4..ff96e392b 100644 --- a/manifests/minimal-postgres-manifest.yaml +++ b/manifests/minimal-postgres-manifest.yaml @@ -18,4 +18,4 @@ spec: preparedDatabases: bar: {} postgresql: - version: "12" + version: "13" diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index 266df30c5..1ba5b4d23 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -26,6 +26,15 @@ rules: - patch - update - watch +# operator only reads PostgresTeams +- apiGroups: + - acid.zalan.do + resources: + - postgresteams + verbs: + - get + - list + - watch # to create or get/update CRDs when starting up - apiGroups: - apiextensions.k8s.io @@ -97,6 +106,8 @@ rules: - delete - get - list + - patch + - update # to read existing PVs. Creation should be done via dynamic provisioning - apiGroups: - "" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index 23b5ff0fc..50405a8cc 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: operatorconfigurations.acid.zalan.do @@ -11,345 +11,441 @@ spec: singular: operatorconfiguration shortNames: - opconfig + categories: + - all scope: Namespaced - subresources: - status: {} - version: v1 - validation: - openAPIV3Schema: - type: object - required: - - kind - - apiVersion - - configuration - properties: - kind: - type: string - enum: - - OperatorConfiguration - apiVersion: - type: string - enum: - - acid.zalan.do/v1 - configuration: - type: object - properties: - docker_image: - type: string - enable_crd_validation: - type: boolean - enable_lazy_spilo_upgrade: - type: boolean - enable_shm_volume: - type: boolean - etcd_host: - type: string - kubernetes_use_configmaps: - type: boolean - max_instances: - type: integer - minimum: -1 # -1 = disabled - min_instances: - type: integer - minimum: -1 # -1 = disabled - resync_period: - type: string - repair_period: - type: string - set_memory_request_to_limit: - type: boolean - sidecar_docker_images: - type: object - additionalProperties: - type: string - sidecars: - type: array - nullable: true - items: - type: object - additionalProperties: true - workers: - type: integer - minimum: 1 - users: - type: object - properties: - replication_username: - type: string - super_username: - type: string - kubernetes: - type: object - properties: - cluster_domain: - type: string - cluster_labels: - type: object - additionalProperties: - type: string - cluster_name_label: - type: string - custom_pod_annotations: - type: object - additionalProperties: - type: string - downscaler_annotations: - type: array - items: - type: string - enable_init_containers: - type: boolean - enable_pod_antiaffinity: - type: boolean - enable_pod_disruption_budget: - type: boolean - enable_sidecars: - type: boolean - infrastructure_roles_secret_name: - type: string - inherited_labels: - type: array - items: - type: string - master_pod_move_timeout: - type: string - node_readiness_label: - type: object - additionalProperties: - type: string - oauth_token_secret_name: - type: string - pdb_name_format: - type: string - pod_antiaffinity_topology_key: - type: string - pod_environment_configmap: - type: string - pod_management_policy: - type: string - enum: - - "ordered_ready" - - "parallel" - pod_priority_class_name: - type: string - pod_role_label: - type: string - pod_service_account_definition: - type: string - pod_service_account_name: - type: string - pod_service_account_role_binding_definition: - type: string - pod_terminate_grace_period: - type: string - secret_name_template: - type: string - spilo_fsgroup: - type: integer - spilo_privileged: - type: boolean - toleration: - type: object - additionalProperties: - type: string - watched_namespace: - type: string - postgres_pod_resources: - type: object - properties: - default_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - min_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - min_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - timeouts: - type: object - properties: - pod_label_wait_timeout: - type: string - pod_deletion_wait_timeout: - type: string - ready_wait_interval: - type: string - ready_wait_timeout: - type: string - resource_check_interval: - type: string - resource_check_timeout: - type: string - load_balancer: - type: object - properties: - custom_service_annotations: - type: object - additionalProperties: - type: string - db_hosted_zone: - type: string - enable_master_load_balancer: - type: boolean - enable_replica_load_balancer: - type: boolean - master_dns_name_format: - type: string - replica_dns_name_format: - type: string - aws_or_gcp: - type: object - properties: - additional_secret_mount: - type: string - additional_secret_mount_path: - type: string - aws_region: - type: string - kube_iam_role: - type: string - log_s3_bucket: - type: string - wal_s3_bucket: - type: string - logical_backup: - type: object - properties: - logical_backup_docker_image: - type: string - logical_backup_s3_access_key_id: - type: string - logical_backup_s3_bucket: - type: string - logical_backup_s3_endpoint: - type: string - logical_backup_s3_region: - type: string - logical_backup_s3_secret_access_key: - type: string - logical_backup_s3_sse: - type: string - logical_backup_schedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' - debug: - type: object - properties: - debug_logging: - type: boolean - enable_database_access: - type: boolean - teams_api: - type: object - properties: - enable_admin_role_for_users: - type: boolean - enable_team_superuser: - type: boolean - enable_teams_api: - type: boolean - pam_configuration: - type: string - pam_role_name: - type: string - postgres_superuser_teams: - type: array - items: - type: string - protected_role_names: - type: array - items: - type: string - team_admin_role: - type: string - team_api_role_configuration: - type: object - additionalProperties: - type: string - teams_api_url: - type: string - logging_rest_api: - type: object - properties: - api_port: - type: integer - cluster_history_entries: - type: integer - ring_log_lines: - type: integer - scalyr: # deprecated - type: object - properties: - scalyr_api_key: - type: string - scalyr_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - scalyr_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - scalyr_image: - type: string - scalyr_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - scalyr_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - scalyr_server_url: - type: string - connection_pooler: - type: object - properties: - connection_pooler_schema: - type: string - #default: "pooler" - connection_pooler_user: - type: string - #default: "pooler" - connection_pooler_image: - type: string - #default: "registry.opensource.zalan.do/acid/pgbouncer" - connection_pooler_max_db_connections: - type: integer - #default: 60 - connection_pooler_mode: - type: string - enum: - - "session" - - "transaction" - #default: "transaction" - connection_pooler_number_of_instances: - type: integer - minimum: 2 - #default: 2 - connection_pooler_default_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - #default: "1" - connection_pooler_default_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - #default: "500m" - connection_pooler_default_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - #default: "100Mi" - connection_pooler_default_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - #default: "100Mi" - status: - type: object - additionalProperties: + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Image + type: string + description: Spilo image to be used for Pods + jsonPath: .configuration.docker_image + - name: Cluster-Label + type: string + description: Label for K8s resources created by operator + jsonPath: .configuration.kubernetes.cluster_name_label + - name: Service-Account + type: string + description: Name of service account to be used + jsonPath: .configuration.kubernetes.pod_service_account_name + - name: Min-Instances + type: integer + description: Minimum number of instances per Postgres cluster + jsonPath: .configuration.min_instances + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - configuration + properties: + kind: type: string + enum: + - OperatorConfiguration + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + configuration: + type: object + properties: + docker_image: + type: string + enable_crd_validation: + type: boolean + enable_lazy_spilo_upgrade: + type: boolean + enable_pgversion_env_var: + type: boolean + enable_shm_volume: + type: boolean + enable_spilo_wal_path_compat: + type: boolean + etcd_host: + type: string + kubernetes_use_configmaps: + type: boolean + max_instances: + type: integer + minimum: -1 # -1 = disabled + min_instances: + type: integer + minimum: -1 # -1 = disabled + resync_period: + type: string + repair_period: + type: string + set_memory_request_to_limit: + type: boolean + sidecar_docker_images: + type: object + additionalProperties: + type: string + sidecars: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + workers: + type: integer + minimum: 1 + users: + type: object + properties: + replication_username: + type: string + super_username: + type: string + kubernetes: + type: object + properties: + cluster_domain: + type: string + cluster_labels: + type: object + additionalProperties: + type: string + cluster_name_label: + type: string + custom_pod_annotations: + type: object + additionalProperties: + type: string + delete_annotation_date_key: + type: string + delete_annotation_name_key: + type: string + downscaler_annotations: + type: array + items: + type: string + enable_init_containers: + type: boolean + enable_pod_antiaffinity: + type: boolean + enable_pod_disruption_budget: + type: boolean + enable_sidecars: + type: boolean + infrastructure_roles_secret_name: + type: string + infrastructure_roles_secrets: + type: array + nullable: true + items: + type: object + required: + - secretname + - userkey + - passwordkey + properties: + secretname: + type: string + userkey: + type: string + passwordkey: + type: string + rolekey: + type: string + defaultuservalue: + type: string + defaultrolevalue: + type: string + details: + type: string + template: + type: boolean + inherited_annotations: + type: array + items: + type: string + inherited_labels: + type: array + items: + type: string + master_pod_move_timeout: + type: string + node_readiness_label: + type: object + additionalProperties: + type: string + oauth_token_secret_name: + type: string + pdb_name_format: + type: string + pod_antiaffinity_topology_key: + type: string + pod_environment_configmap: + type: string + pod_environment_secret: + type: string + pod_management_policy: + type: string + enum: + - "ordered_ready" + - "parallel" + pod_priority_class_name: + type: string + pod_role_label: + type: string + pod_service_account_definition: + type: string + pod_service_account_name: + type: string + pod_service_account_role_binding_definition: + type: string + pod_terminate_grace_period: + type: string + secret_name_template: + type: string + spilo_runasuser: + type: integer + spilo_runasgroup: + type: integer + spilo_fsgroup: + type: integer + spilo_privileged: + type: boolean + storage_resize_mode: + type: string + enum: + - "ebs" + - "pvc" + - "off" + toleration: + type: object + additionalProperties: + type: string + watched_namespace: + type: string + postgres_pod_resources: + type: object + properties: + default_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + min_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + min_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + timeouts: + type: object + properties: + pod_label_wait_timeout: + type: string + pod_deletion_wait_timeout: + type: string + ready_wait_interval: + type: string + ready_wait_timeout: + type: string + resource_check_interval: + type: string + resource_check_timeout: + type: string + load_balancer: + type: object + properties: + custom_service_annotations: + type: object + additionalProperties: + type: string + db_hosted_zone: + type: string + enable_master_load_balancer: + type: boolean + enable_replica_load_balancer: + type: boolean + external_traffic_policy: + type: string + enum: + - "Cluster" + - "Local" + master_dns_name_format: + type: string + replica_dns_name_format: + type: string + aws_or_gcp: + type: object + properties: + additional_secret_mount: + type: string + additional_secret_mount_path: + type: string + aws_region: + type: string + enable_ebs_gp3_migration: + type: boolean + enable_ebs_gp3_migration_max_size: + type: integer + gcp_credentials: + type: string + kube_iam_role: + type: string + log_s3_bucket: + type: string + wal_gs_bucket: + type: string + wal_s3_bucket: + type: string + logical_backup: + type: object + properties: + logical_backup_docker_image: + type: string + logical_backup_google_application_credentials: + type: string + logical_backup_provider: + type: string + logical_backup_s3_access_key_id: + type: string + logical_backup_s3_bucket: + type: string + logical_backup_s3_endpoint: + type: string + logical_backup_s3_region: + type: string + logical_backup_s3_secret_access_key: + type: string + logical_backup_s3_sse: + type: string + logical_backup_schedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + debug: + type: object + properties: + debug_logging: + type: boolean + enable_database_access: + type: boolean + teams_api: + type: object + properties: + enable_admin_role_for_users: + type: boolean + enable_postgres_team_crd: + type: boolean + enable_postgres_team_crd_superusers: + type: boolean + enable_team_superuser: + type: boolean + enable_teams_api: + type: boolean + pam_configuration: + type: string + pam_role_name: + type: string + postgres_superuser_teams: + type: array + items: + type: string + protected_role_names: + type: array + items: + type: string + team_admin_role: + type: string + team_api_role_configuration: + type: object + additionalProperties: + type: string + teams_api_url: + type: string + logging_rest_api: + type: object + properties: + api_port: + type: integer + cluster_history_entries: + type: integer + ring_log_lines: + type: integer + scalyr: # deprecated + type: object + properties: + scalyr_api_key: + type: string + scalyr_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + scalyr_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + scalyr_image: + type: string + scalyr_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + scalyr_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + scalyr_server_url: + type: string + connection_pooler: + type: object + properties: + connection_pooler_schema: + type: string + #default: "pooler" + connection_pooler_user: + type: string + #default: "pooler" + connection_pooler_image: + type: string + #default: "registry.opensource.zalan.do/acid/pgbouncer" + connection_pooler_max_db_connections: + type: integer + #default: 60 + connection_pooler_mode: + type: string + enum: + - "session" + - "transaction" + #default: "transaction" + connection_pooler_number_of_instances: + type: integer + minimum: 2 + #default: 2 + connection_pooler_default_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + #default: "1" + connection_pooler_default_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + #default: "500m" + connection_pooler_default_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + #default: "100Mi" + connection_pooler_default_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + #default: "100Mi" + status: + type: object + additionalProperties: + type: string diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index e7a604a2d..da4ca7fc6 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -2,8 +2,12 @@ apiVersion: apps/v1 kind: Deployment metadata: name: postgres-operator + labels: + application: postgres-operator spec: replicas: 1 + strategy: + type: "Recreate" selector: matchLabels: name: postgres-operator @@ -15,7 +19,7 @@ spec: serviceAccountName: postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:v1.5.0 + image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.0 imagePullPolicy: IfNotPresent resources: requests: diff --git a/manifests/postgres-pod-priority-class.yaml b/manifests/postgres-pod-priority-class.yaml new file mode 100644 index 000000000..f1b565f21 --- /dev/null +++ b/manifests/postgres-pod-priority-class.yaml @@ -0,0 +1,11 @@ +apiVersion: scheduling.k8s.io/v1 +description: 'This priority class must be used only for databases controlled by the + Postgres operator' +kind: PriorityClass +metadata: + labels: + application: postgres-operator + name: postgres-pod-priority +preemptionPolicy: PreemptLowerPriority +globalDefault: false +value: 1000000 diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 049e917f6..efc6052da 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -3,10 +3,12 @@ kind: OperatorConfiguration metadata: name: postgresql-operator-default-configuration configuration: - docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 # enable_crd_validation: true # enable_lazy_spilo_upgrade: false + enable_pgversion_env_var: true # enable_shm_volume: true + enable_spilo_wal_path_compat: false etcd_host: "" # kubernetes_use_configmaps: false max_instances: -1 @@ -19,7 +21,7 @@ configuration: # name: global-sidecar-1 # ports: # - containerPort: 80 - workers: 4 + workers: 8 users: replication_username: standby super_username: postgres @@ -31,6 +33,8 @@ configuration: # custom_pod_annotations: # keya: valuea # keyb: valueb + # delete_annotation_date_key: delete-date + # delete_annotation_name_key: delete-clustername # downscaler_annotations: # - deployment-time # - downscaler/* @@ -39,6 +43,16 @@ configuration: enable_pod_disruption_budget: true enable_sidecars: true # infrastructure_roles_secret_name: "postgresql-infrastructure-roles" + # infrastructure_roles_secrets: + # - secretname: "monitoring-roles" + # userkey: "user" + # passwordkey: "password" + # rolekey: "inrole" + # - secretname: "other-infrastructure-role" + # userkey: "other-user-key" + # passwordkey: "other-password-key" + # inherited_annotations: + # - owned-by # inherited_labels: # - application # - environment @@ -49,16 +63,20 @@ configuration: pdb_name_format: "postgres-{cluster}-pdb" pod_antiaffinity_topology_key: "kubernetes.io/hostname" # pod_environment_configmap: "default/my-custom-config" + # pod_environment_secret: "my-custom-secret" pod_management_policy: "ordered_ready" - # pod_priority_class_name: "" + # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role # pod_service_account_definition: "" pod_service_account_name: postgres-pod # pod_service_account_role_binding_definition: "" pod_terminate_grace_period: 5m secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + # spilo_runasuser: 101 + # spilo_runasgroup: 103 # spilo_fsgroup: 103 spilo_privileged: false + storage_resize_mode: pvc # toleration: {} # watched_namespace: "" postgres_pod_resources: @@ -76,23 +94,28 @@ configuration: resource_check_interval: 3s resource_check_timeout: 10m load_balancer: - # db_hosted_zone: "" - enable_master_load_balancer: false - enable_replica_load_balancer: false # custom_service_annotations: # keyx: valuex # keyy: valuey + # db_hosted_zone: "" + enable_master_load_balancer: false + enable_replica_load_balancer: false + external_traffic_policy: "Cluster" master_dns_name_format: "{cluster}.{team}.{hostedzone}" replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" aws_or_gcp: # additional_secret_mount: "some-secret-name" # additional_secret_mount_path: "/some/dir" aws_region: eu-central-1 + enable_ebs_gp3_migration: false + # enable_ebs_gp3_migration_max_size: 1000 + # gcp_credentials: "" # kube_iam_role: "" # log_s3_bucket: "" + # wal_gs_bucket: "" # wal_s3_bucket: "" logical_backup: - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:master-58" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v.1.6.0" # logical_backup_s3_access_key_id: "" logical_backup_s3_bucket: "my-bucket-url" # logical_backup_s3_endpoint: "" @@ -105,6 +128,8 @@ configuration: enable_database_access: true teams_api: # enable_admin_role_for_users: true + # enable_postgres_team_crd: false + # enable_postgres_team_crd_superusers: false enable_team_superuser: false enable_teams_api: false # pam_configuration: "" @@ -126,7 +151,7 @@ configuration: connection_pooler_default_cpu_request: "500m" connection_pooler_default_memory_limit: 100Mi connection_pooler_default_memory_request: 100Mi - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7" + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9" # connection_pooler_max_db_connections: 60 connection_pooler_mode: "transaction" connection_pooler_number_of_instances: 2 diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index e62204c40..d5170e9d4 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: postgresqls.acid.zalan.do @@ -11,437 +11,557 @@ spec: singular: postgresql shortNames: - pg + categories: + - all scope: Namespaced - subresources: - status: {} - version: v1 - validation: - openAPIV3Schema: - type: object - required: - - kind - - apiVersion - - spec - properties: - kind: - type: string - enum: - - postgresql - apiVersion: - type: string - enum: - - acid.zalan.do/v1 - spec: - type: object - required: - - numberOfInstances - - teamId - - postgresql - properties: - additionalVolumes: - type: array - items: - type: object - required: - - name - - mountPath - - volumeSource - properties: - name: - type: string - mountPath: - type: string - targetContainers: - type: array - nullable: true - items: - type: string - volumeSource: - type: object - subPath: - type: string - allowedSourceRanges: - type: array - nullable: true - items: - type: string - pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$' - clone: - type: object - required: - - cluster - properties: - cluster: - type: string - s3_endpoint: - type: string - s3_access_key_id: - type: string - s3_secret_access_key: - type: string - s3_force_path_style: - type: boolean - s3_wal_path: - type: string - timestamp: - type: string - pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([Zz])|([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$' - # The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC - # Example: 1996-12-19T16:39:57-08:00 - # Note: this field requires a timezone - uid: - format: uuid - type: string - connectionPooler: - type: object - properties: - dockerImage: - type: string - maxDBConnections: - type: integer - mode: - type: string - enum: - - "session" - - "transaction" - numberOfInstances: - type: integer - minimum: 2 - resources: + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Team + type: string + description: Team responsible for Postgres CLuster + jsonPath: .spec.teamId + - name: Version + type: string + description: PostgreSQL version + jsonPath: .spec.postgresql.version + - name: Pods + type: integer + description: Number of Pods per Postgres cluster + jsonPath: .spec.numberOfInstances + - name: Volume + type: string + description: Size of the bound volume + jsonPath: .spec.volume.size + - name: CPU-Request + type: string + description: Requested CPU for Postgres containers + jsonPath: .spec.resources.requests.cpu + - name: Memory-Request + type: string + description: Requested memory for Postgres containers + jsonPath: .spec.resources.requests.memory + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + - name: Status + type: string + description: Current sync status of postgresql resource + jsonPath: .status.PostgresClusterStatus + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - postgresql + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + required: + - numberOfInstances + - teamId + - postgresql + - volume + properties: + additionalVolumes: + type: array + items: type: object required: - - requests - - limits + - name + - mountPath + - volumeSource properties: - limits: + name: + type: string + mountPath: + type: string + targetContainers: + type: array + nullable: true + items: + type: string + volumeSource: type: object - required: - - cpu - - memory - properties: - cpu: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - memory: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - requests: - type: object - required: - - cpu - - memory - properties: - cpu: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - memory: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - schema: - type: string - user: - type: string - databases: - type: object - additionalProperties: - type: string - # Note: usernames specified here as database owners must be declared in the users key of the spec key. - dockerImage: - type: string - enableConnectionPooler: - type: boolean - enableLogicalBackup: - type: boolean - enableMasterLoadBalancer: - type: boolean - enableReplicaLoadBalancer: - type: boolean - enableShmVolume: - type: boolean - init_containers: # deprecated - type: array - nullable: true - items: - type: object - additionalProperties: true - initContainers: - type: array - nullable: true - items: - type: object - additionalProperties: true - logicalBackupSchedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' - maintenanceWindows: - type: array - items: - type: string - pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' - numberOfInstances: - type: integer - minimum: 0 - patroni: - type: object - properties: - initdb: - type: object - additionalProperties: - type: string - pg_hba: - type: array - items: - type: string - slots: - type: object - additionalProperties: - type: object - additionalProperties: + x-kubernetes-preserve-unknown-fields: true + subPath: type: string - ttl: - type: integer - loop_wait: - type: integer - retry_timeout: - type: integer - maximum_lag_on_failover: - type: integer - synchronous_mode: - type: boolean - synchronous_mode_strict: - type: boolean - podAnnotations: - type: object - additionalProperties: - type: string - pod_priority_class_name: # deprecated - type: string - podPriorityClassName: - type: string - postgresql: - type: object - required: - - version - properties: - version: - type: string - enum: - - "9.3" - - "9.4" - - "9.5" - - "9.6" - - "10" - - "11" - - "12" - parameters: - type: object - additionalProperties: - type: string - preparedDatabases: - type: object - additionalProperties: - type: object - properties: - defaultUsers: - type: boolean - extensions: - type: object - additionalProperties: - type: string - schemas: - type: object - additionalProperties: - type: object - properties: - defaultUsers: - type: boolean - defaultRoles: - type: boolean - replicaLoadBalancer: # deprecated - type: boolean - resources: - type: object - required: - - requests - - limits - properties: - limits: - type: object - required: - - cpu - - memory - properties: - cpu: - type: string - # Decimal natural followed by m, or decimal natural followed by - # dot followed by up to three decimal digits. - # - # This is because the Kubernetes CPU resource has millis as the - # maximum precision. The actual values are checked in code - # because the regular expression would be huge and horrible and - # not very helpful in validation error messages; this one checks - # only the format of the given number. - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - # Note: the value specified here must not be zero or be lower - # than the corresponding request. - memory: - type: string - # You can express memory as a plain integer or as a fixed-point - # integer using one of these suffixes: E, P, T, G, M, k. You can - # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero or be lower - # than the corresponding request. - requests: - type: object - required: - - cpu - - memory - properties: - cpu: - type: string - # Decimal natural followed by m, or decimal natural followed by - # dot followed by up to three decimal digits. - # - # This is because the Kubernetes CPU resource has millis as the - # maximum precision. The actual values are checked in code - # because the regular expression would be huge and horrible and - # not very helpful in validation error messages; this one checks - # only the format of the given number. - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - # Note: the value specified here must not be zero or be higher - # than the corresponding limit. - memory: - type: string - # You can express memory as a plain integer or as a fixed-point - # integer using one of these suffixes: E, P, T, G, M, k. You can - # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero or be higher - # than the corresponding limit. - serviceAnnotations: - type: object - additionalProperties: - type: string - sidecars: - type: array - nullable: true - items: - type: object - additionalProperties: true - spiloFSGroup: - type: integer - standby: - type: object - required: - - s3_wal_path - properties: - s3_wal_path: - type: string - teamId: - type: string - tls: - type: object - required: - - secretName - properties: - secretName: - type: string - certificateFile: - type: string - privateKeyFile: - type: string - caFile: - type: string - caSecretName: - type: string - tolerations: - type: array - items: - type: object - required: - - key - - operator - - effect - properties: - key: - type: string - operator: - type: string - enum: - - Equal - - Exists - value: - type: string - effect: - type: string - enum: - - NoExecute - - NoSchedule - - PreferNoSchedule - tolerationSeconds: - type: integer - useLoadBalancer: # deprecated - type: boolean - users: - type: object - additionalProperties: + allowedSourceRanges: type: array nullable: true - description: "Role flags specified here must not contradict each other" items: type: string - enum: - - bypassrls - - BYPASSRLS - - nobypassrls - - NOBYPASSRLS - - createdb - - CREATEDB - - nocreatedb - - NOCREATEDB - - createrole - - CREATEROLE - - nocreaterole - - NOCREATEROLE - - inherit - - INHERIT - - noinherit - - NOINHERIT - - login - - LOGIN - - nologin - - NOLOGIN - - replication - - REPLICATION - - noreplication - - NOREPLICATION - - superuser - - SUPERUSER - - nosuperuser - - NOSUPERUSER - volume: - type: object - required: - - size - properties: - size: + pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$' + clone: + type: object + required: + - cluster + properties: + cluster: + type: string + s3_endpoint: + type: string + s3_access_key_id: + type: string + s3_secret_access_key: + type: string + s3_force_path_style: + type: boolean + s3_wal_path: + type: string + timestamp: + type: string + pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$' + # The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC + # Example: 1996-12-19T16:39:57-08:00 + # Note: this field requires a timezone + uid: + format: uuid + type: string + connectionPooler: + type: object + properties: + dockerImage: + type: string + maxDBConnections: + type: integer + mode: + type: string + enum: + - "session" + - "transaction" + numberOfInstances: + type: integer + minimum: 2 + resources: + type: object + required: + - requests + - limits + properties: + limits: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + requests: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + schema: + type: string + user: + type: string + databases: + type: object + additionalProperties: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero. - storageClass: + # Note: usernames specified here as database owners must be declared in the users key of the spec key. + dockerImage: + type: string + enableConnectionPooler: + type: boolean + enableReplicaConnectionPooler: + type: boolean + enableLogicalBackup: + type: boolean + enableMasterLoadBalancer: + type: boolean + enableReplicaLoadBalancer: + type: boolean + enableShmVolume: + type: boolean + init_containers: # deprecated + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + initContainers: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + logicalBackupSchedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + maintenanceWindows: + type: array + items: type: string - subPath: + pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' + numberOfInstances: + type: integer + minimum: 0 + patroni: + type: object + properties: + initdb: + type: object + additionalProperties: + type: string + loop_wait: + type: integer + maximum_lag_on_failover: + type: integer + pg_hba: + type: array + items: + type: string + retry_timeout: + type: integer + slots: + type: object + additionalProperties: + type: object + additionalProperties: + type: string + synchronous_mode: + type: boolean + synchronous_mode_strict: + type: boolean + ttl: + type: integer + podAnnotations: + type: object + additionalProperties: type: string - status: - type: object - additionalProperties: - type: string + pod_priority_class_name: # deprecated + type: string + podPriorityClassName: + type: string + postgresql: + type: object + required: + - version + properties: + version: + type: string + enum: + - "9.3" + - "9.4" + - "9.5" + - "9.6" + - "10" + - "11" + - "12" + - "13" + parameters: + type: object + additionalProperties: + type: string + preparedDatabases: + type: object + additionalProperties: + type: object + properties: + defaultUsers: + type: boolean + extensions: + type: object + additionalProperties: + type: string + schemas: + type: object + additionalProperties: + type: object + properties: + defaultUsers: + type: boolean + defaultRoles: + type: boolean + replicaLoadBalancer: # deprecated + type: boolean + resources: + type: object + required: + - requests + - limits + properties: + limits: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + # Decimal natural followed by m, or decimal natural followed by + # dot followed by up to three decimal digits. + # + # This is because the Kubernetes CPU resource has millis as the + # maximum precision. The actual values are checked in code + # because the regular expression would be huge and horrible and + # not very helpful in validation error messages; this one checks + # only the format of the given number. + # + # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + # Note: the value specified here must not be zero or be lower + # than the corresponding request. + memory: + type: string + # You can express memory as a plain integer or as a fixed-point + # integer using one of these suffixes: E, P, T, G, M, k. You can + # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki + # + # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + # Note: the value specified here must not be zero or be higher + # than the corresponding limit. + requests: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + schedulerName: + type: string + serviceAnnotations: + type: object + additionalProperties: + type: string + sidecars: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + spiloRunAsUser: + type: integer + spiloRunAsGroup: + type: integer + spiloFSGroup: + type: integer + standby: + type: object + required: + - s3_wal_path + properties: + s3_wal_path: + type: string + teamId: + type: string + tls: + type: object + required: + - secretName + properties: + secretName: + type: string + certificateFile: + type: string + privateKeyFile: + type: string + caFile: + type: string + caSecretName: + type: string + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + required: + - weight + - preference + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + format: int32 + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + tolerations: + type: array + items: + type: object + required: + - key + - operator + - effect + properties: + key: + type: string + operator: + type: string + enum: + - Equal + - Exists + value: + type: string + effect: + type: string + enum: + - NoExecute + - NoSchedule + - PreferNoSchedule + tolerationSeconds: + type: integer + useLoadBalancer: # deprecated + type: boolean + users: + type: object + additionalProperties: + type: array + nullable: true + description: "Role flags specified here must not contradict each other" + items: + type: string + enum: + - bypassrls + - BYPASSRLS + - nobypassrls + - NOBYPASSRLS + - createdb + - CREATEDB + - nocreatedb + - NOCREATEDB + - createrole + - CREATEROLE + - nocreaterole + - NOCREATEROLE + - inherit + - INHERIT + - noinherit + - NOINHERIT + - login + - LOGIN + - nologin + - NOLOGIN + - replication + - REPLICATION + - noreplication + - NOREPLICATION + - superuser + - SUPERUSER + - nosuperuser + - NOSUPERUSER + volume: + type: object + required: + - size + properties: + size: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + # Note: the value specified here must not be zero. + storageClass: + type: string + subPath: + type: string + status: + type: object + additionalProperties: + type: string diff --git a/manifests/postgresteam.crd.yaml b/manifests/postgresteam.crd.yaml new file mode 100644 index 000000000..2588e53b1 --- /dev/null +++ b/manifests/postgresteam.crd.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: postgresteams.acid.zalan.do +spec: + group: acid.zalan.do + names: + kind: PostgresTeam + listKind: PostgresTeamList + plural: postgresteams + singular: postgresteam + shortNames: + - pgteam + categories: + - all + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - PostgresTeam + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + properties: + additionalSuperuserTeams: + type: object + description: "Map for teamId and associated additional superuser teams" + additionalProperties: + type: array + nullable: true + description: "List of teams to become Postgres superusers" + items: + type: string + additionalTeams: + type: object + description: "Map for teamId and associated additional teams" + additionalProperties: + type: array + nullable: true + description: "List of teams whose members will also be added to the Postgres cluster" + items: + type: string + additionalMembers: + type: object + description: "Map for teamId and associated additional users" + additionalProperties: + type: array + nullable: true + description: "List of users who will also be added to the Postgres cluster" + items: + type: string diff --git a/manifests/standby-manifest.yaml b/manifests/standby-manifest.yaml index 4c8d09650..593f409ec 100644 --- a/manifests/standby-manifest.yaml +++ b/manifests/standby-manifest.yaml @@ -9,7 +9,7 @@ spec: size: 1Gi numberOfInstances: 1 postgresql: - version: "12" + version: "13" # Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming. standby: s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/" diff --git a/mkdocs.yml b/mkdocs.yml index 34f55fac8..b8e8c3e04 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,4 +13,3 @@ nav: - Config parameters: 'reference/operator_parameters.md' - Manifest parameters: 'reference/cluster_manifest.md' - CLI options and environment: 'reference/command_line_and_environment.md' - - Google Summer of Code 2019: 'gsoc-2019/ideas.md' diff --git a/mocks/mocks.go b/mocks/mocks.go new file mode 100644 index 000000000..f726b26e5 --- /dev/null +++ b/mocks/mocks.go @@ -0,0 +1 @@ +package mocks diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index ad1b79a45..3d4ff09bc 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -2,7 +2,8 @@ package v1 import ( acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "github.com/zalando/postgres-operator/pkg/util" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -20,7 +21,7 @@ const ( ) // PostgresCRDResourceColumns definition of AdditionalPrinterColumns for postgresql CRD -var PostgresCRDResourceColumns = []apiextv1beta1.CustomResourceColumnDefinition{ +var PostgresCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition{ { Name: "Team", Type: "string", @@ -71,7 +72,7 @@ var PostgresCRDResourceColumns = []apiextv1beta1.CustomResourceColumnDefinition{ } // OperatorConfigCRDResourceColumns definition of AdditionalPrinterColumns for OperatorConfiguration CRD -var OperatorConfigCRDResourceColumns = []apiextv1beta1.CustomResourceColumnDefinition{ +var OperatorConfigCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition{ { Name: "Image", Type: "string", @@ -109,14 +110,14 @@ var min2 = 2.0 var minDisable = -1.0 // PostgresCRDResourceValidation to check applied manifest parameters -var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ - OpenAPIV3Schema: &apiextv1beta1.JSONSchemaProps{ +var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextv1.JSONSchemaProps{ Type: "object", Required: []string{"kind", "apiVersion", "spec"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "kind": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"postgresql"`), }, @@ -124,7 +125,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "apiVersion": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"acid.zalan.do/v1"`), }, @@ -132,13 +133,45 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "spec": { Type: "object", - Required: []string{"numberOfInstances", "teamId", "postgresql"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Required: []string{"numberOfInstances", "teamId", "postgresql", "volume"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "additionalVolumes": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Required: []string{"name", "mountPath", "volumeSource"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "name": { + Type: "string", + }, + "mountPath": { + Type: "string", + }, + "targetContainers": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + "volumeSource": { + Type: "object", + XPreserveUnknownFields: util.True(), + }, + "subPath": { + Type: "string", + }, + }, + }, + }, + }, "allowedSourceRanges": { Type: "array", Nullable: true, - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", Pattern: "^(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\/(\\d|[1-2]\\d|3[0-2])$", }, @@ -147,7 +180,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "clone": { Type: "object", Required: []string{"cluster"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cluster": { Type: "string", }, @@ -169,7 +202,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "timestamp": { Type: "string", Description: "Date-time format that specifies a timezone as an offset relative to UTC e.g. 1996-12-19T16:39:57-08:00", - Pattern: "^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\\.[0-9]+)?(([Zz])|([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$", + Pattern: "^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$", }, "uid": { Type: "string", @@ -179,7 +212,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "connectionPooler": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "dockerImage": { Type: "string", }, @@ -188,7 +221,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "mode": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"session"`), }, @@ -204,11 +237,11 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "resources": { Type: "object", Required: []string{"requests", "limits"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "limits": { Type: "object", Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cpu": { Type: "string", Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", @@ -224,7 +257,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "requests": { Type: "object", Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cpu": { Type: "string", Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", @@ -249,8 +282,8 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "databases": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", Description: "User names specified here as database owners must be declared in the users key of the spec key", }, @@ -262,6 +295,9 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "enableConnectionPooler": { Type: "boolean", }, + "enableReplicaConnectionPooler": { + Type: "boolean", + }, "enableLogicalBackup": { Type: "boolean", }, @@ -277,23 +313,19 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "init_containers": { Type: "array", Description: "Deprecated", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Allows: true, - }, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, "initContainers": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Allows: true, - }, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, @@ -303,8 +335,8 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "maintenanceWindows": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", Pattern: "^\\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))\\ *$", }, @@ -316,60 +348,60 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "patroni": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "initdb": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, + "loop_wait": { + Type: "integer", + }, + "maximum_lag_on_failover": { + Type: "integer", + }, "pg_hba": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, + "retry_timeout": { + Type: "integer", + }, "slots": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, }, }, - "ttl": { - Type: "integer", - }, - "loop_wait": { - Type: "integer", - }, - "retry_timeout": { - Type: "integer", - }, - "maximum_lag_on_failover": { - Type: "integer", - }, "synchronous_mode": { Type: "boolean", }, "synchronous_mode_strict": { Type: "boolean", }, + "ttl": { + Type: "integer", + }, }, }, "podAnnotations": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -384,10 +416,10 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "postgresql": { Type: "object", Required: []string{"version"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "version": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"9.3"`), }, @@ -409,12 +441,15 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ { Raw: []byte(`"12"`), }, + { + Raw: []byte(`"13"`), + }, }, }, "parameters": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -423,27 +458,27 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "preparedDatabases": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "defaultUsers": { Type: "boolean", }, "extensions": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, "schemas": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "defaultUsers": { Type: "boolean", }, @@ -465,11 +500,11 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "resources": { Type: "object", Required: []string{"requests", "limits"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "limits": { Type: "object", Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cpu": { Type: "string", Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", @@ -485,7 +520,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "requests": { Type: "object", Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cpu": { Type: "string", Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", @@ -500,32 +535,39 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, }, }, + "schedulerName": { + Type: "string", + }, "serviceAnnotations": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, "sidecars": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Allows: true, - }, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, + "spiloRunAsUser": { + Type: "integer", + }, + "spiloRunAsGroup": { + Type: "integer", + }, "spiloFSGroup": { Type: "integer", }, "standby": { Type: "object", Required: []string{"s3_wal_path"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "s3_wal_path": { Type: "string", }, @@ -537,7 +579,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "tls": { Type: "object", Required: []string{"secretName"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "secretName": { Type: "string", }, @@ -555,19 +597,104 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, }, }, + "nodeAffinity": { + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "preferredDuringSchedulingIgnoredDuringExecution": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Required: []string{"preference, weight"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "preference": { + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "matchExpressions": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, + "matchFields": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, + }, + }, + "weight": { + Type: "integer", + Format: "int32", + }, + }, + }, + }, + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + Type: "object", + Required: []string{"nodeSelectorTerms"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "nodeSelectorTerms": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "matchExpressions": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, + "matchFields": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "tolerations": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "object", Required: []string{"key", "operator", "effect"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "key": { Type: "string", }, "operator": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"Equal"`), }, @@ -581,7 +708,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "effect": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"NoExecute"`), }, @@ -606,15 +733,15 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "users": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "array", Description: "Role flags specified here must not contradict each other", Nullable: true, - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"bypassrls"`), }, @@ -708,7 +835,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "volume": { Type: "object", Required: []string{"size"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "size": { Type: "string", Description: "Value must not be zero", @@ -722,43 +849,12 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, }, }, - "additionalVolumes": { - Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "object", - Required: []string{"name", "mountPath", "volumeSource"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ - "name": { - Type: "string", - }, - "mountPath": { - Type: "string", - }, - "targetContainers": { - Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "string", - }, - }, - }, - "volumeSource": { - Type: "object", - }, - "subPath": { - Type: "string", - }, - }, - }, - }, - }, }, }, "status": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -768,14 +864,14 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ } // OperatorConfigCRDResourceValidation to check applied manifest parameters -var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ - OpenAPIV3Schema: &apiextv1beta1.JSONSchemaProps{ +var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextv1.JSONSchemaProps{ Type: "object", Required: []string{"kind", "apiVersion", "configuration"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "kind": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"OperatorConfiguration"`), }, @@ -783,7 +879,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "apiVersion": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"acid.zalan.do/v1"`), }, @@ -791,7 +887,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "configuration": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "docker_image": { Type: "string", }, @@ -804,6 +900,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "enable_shm_volume": { Type: "boolean", }, + "enable_spilo_wal_path_compat": { + Type: "boolean", + }, "etcd_host": { Type: "string", }, @@ -831,20 +930,18 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "sidecar_docker_images": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, "sidecars": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Allows: true, - }, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, @@ -854,7 +951,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "users": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "replication_username": { Type: "string", }, @@ -865,14 +962,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "kubernetes": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cluster_domain": { Type: "string", }, "cluster_labels": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -882,16 +979,22 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "custom_pod_annotations": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, + "delete_annotation_date_key": { + Type: "string", + }, + "delete_annotation_name_key": { + Type: "string", + }, "downscaler_annotations": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -911,10 +1014,53 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "infrastructure_roles_secret_name": { Type: "string", }, + "infrastructure_roles_secrets": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Required: []string{"secretname", "userkey", "passwordkey"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "secretname": { + Type: "string", + }, + "userkey": { + Type: "string", + }, + "passwordkey": { + Type: "string", + }, + "rolekey": { + Type: "string", + }, + "defaultuservalue": { + Type: "string", + }, + "defaultrolevalue": { + Type: "string", + }, + "details": { + Type: "string", + }, + "template": { + Type: "boolean", + }, + }, + }, + }, + }, + "inherited_annotations": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "inherited_labels": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -924,8 +1070,8 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "node_readiness_label": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -942,9 +1088,12 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "pod_environment_configmap": { Type: "string", }, + "pod_environment_secret": { + Type: "string", + }, "pod_management_policy": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"ordered_ready"`), }, @@ -974,16 +1123,36 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "secret_name_template": { Type: "string", }, + "spilo_runasuser": { + Type: "integer", + }, + "spilo_runasgroup": { + Type: "integer", + }, "spilo_fsgroup": { Type: "integer", }, "spilo_privileged": { Type: "boolean", }, + "storage_resize_mode": { + Type: "string", + Enum: []apiextv1.JSON{ + { + Raw: []byte(`"ebs"`), + }, + { + Raw: []byte(`"pvc"`), + }, + { + Raw: []byte(`"off"`), + }, + }, + }, "toleration": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -995,7 +1164,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "postgres_pod_resources": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "default_cpu_limit": { Type: "string", Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", @@ -1024,7 +1193,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "timeouts": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "pod_label_wait_timeout": { Type: "string", }, @@ -1047,11 +1216,11 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "load_balancer": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "custom_service_annotations": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -1065,6 +1234,17 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "enable_replica_load_balancer": { Type: "boolean", }, + "external_traffic_policy": { + Type: "string", + Enum: []apiextv1.JSON{ + { + Raw: []byte(`"Cluster"`), + }, + { + Raw: []byte(`"Local"`), + }, + }, + }, "master_dns_name_format": { Type: "string", }, @@ -1075,7 +1255,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "aws_or_gcp": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "additional_secret_mount": { Type: "string", }, @@ -1085,6 +1265,15 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "aws_region": { Type: "string", }, + "enable_ebs_gp3_migration": { + Type: "boolean", + }, + "enable_ebs_gp3_migration_max_size": { + Type: "integer", + }, + "gcp_credentials": { + Type: "string", + }, "kube_iam_role": { Type: "string", }, @@ -1098,10 +1287,16 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "logical_backup": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "logical_backup_docker_image": { Type: "string", }, + "logical_backup_google_application_credentials": { + Type: "string", + }, + "logical_backup_provider": { + Type: "string", + }, "logical_backup_s3_access_key_id": { Type: "string", }, @@ -1128,7 +1323,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "debug": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "debug_logging": { Type: "boolean", }, @@ -1139,10 +1334,16 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "teams_api": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "enable_admin_role_for_users": { Type: "boolean", }, + "enable_postgres_team_crd": { + Type: "boolean", + }, + "enable_postgres_team_crd_superusers": { + Type: "boolean", + }, "enable_team_superuser": { Type: "boolean", }, @@ -1157,16 +1358,16 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "postgres_superuser_teams": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, "protected_role_names": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -1176,8 +1377,8 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "team_api_role_configuration": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -1189,7 +1390,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "logging_rest_api": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "api_port": { Type: "integer", }, @@ -1203,7 +1404,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "scalyr": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "scalyr_api_key": { Type: "string", }, @@ -1233,7 +1434,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "connection_pooler": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "connection_pooler_default_cpu_limit": { Type: "string", Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", @@ -1258,7 +1459,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "connection_pooler_mode": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"session"`), }, @@ -1283,8 +1484,8 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "status": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -1293,32 +1494,39 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, } -func buildCRD(name, kind, plural, short string, columns []apiextv1beta1.CustomResourceColumnDefinition, validation apiextv1beta1.CustomResourceValidation) *apiextv1beta1.CustomResourceDefinition { - return &apiextv1beta1.CustomResourceDefinition{ +func buildCRD(name, kind, plural, short string, columns []apiextv1.CustomResourceColumnDefinition, validation apiextv1.CustomResourceValidation) *apiextv1.CustomResourceDefinition { + return &apiextv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: apiextv1beta1.CustomResourceDefinitionSpec{ - Group: SchemeGroupVersion.Group, - Version: SchemeGroupVersion.Version, - Names: apiextv1beta1.CustomResourceDefinitionNames{ + Spec: apiextv1.CustomResourceDefinitionSpec{ + Group: SchemeGroupVersion.Group, + Names: apiextv1.CustomResourceDefinitionNames{ Plural: plural, ShortNames: []string{short}, Kind: kind, + Categories: []string{"all"}, }, - Scope: apiextv1beta1.NamespaceScoped, - Subresources: &apiextv1beta1.CustomResourceSubresources{ - Status: &apiextv1beta1.CustomResourceSubresourceStatus{}, + Scope: apiextv1.NamespaceScoped, + Versions: []apiextv1.CustomResourceDefinitionVersion{ + { + Name: SchemeGroupVersion.Version, + Served: true, + Storage: true, + Subresources: &apiextv1.CustomResourceSubresources{ + Status: &apiextv1.CustomResourceSubresourceStatus{}, + }, + AdditionalPrinterColumns: columns, + Schema: &validation, + }, }, - AdditionalPrinterColumns: columns, - Validation: &validation, }, } } // PostgresCRD returns CustomResourceDefinition built from PostgresCRDResource -func PostgresCRD(enableValidation *bool) *apiextv1beta1.CustomResourceDefinition { - postgresCRDvalidation := apiextv1beta1.CustomResourceValidation{} +func PostgresCRD(enableValidation *bool) *apiextv1.CustomResourceDefinition { + postgresCRDvalidation := apiextv1.CustomResourceValidation{} if enableValidation != nil && *enableValidation { postgresCRDvalidation = PostgresCRDResourceValidation @@ -1333,8 +1541,8 @@ func PostgresCRD(enableValidation *bool) *apiextv1beta1.CustomResourceDefinition } // ConfigurationCRD returns CustomResourceDefinition built from OperatorConfigCRDResource -func ConfigurationCRD(enableValidation *bool) *apiextv1beta1.CustomResourceDefinition { - opconfigCRDvalidation := apiextv1beta1.CustomResourceValidation{} +func ConfigurationCRD(enableValidation *bool) *apiextv1.CustomResourceDefinition { + opconfigCRDvalidation := apiextv1.CustomResourceValidation{} if enableValidation != nil && *enableValidation { opconfigCRDvalidation = OperatorConfigCRDResourceValidation diff --git a/pkg/apis/acid.zalan.do/v1/marshal.go b/pkg/apis/acid.zalan.do/v1/marshal.go index d180f784c..9521082fc 100644 --- a/pkg/apis/acid.zalan.do/v1/marshal.go +++ b/pkg/apis/acid.zalan.do/v1/marshal.go @@ -102,7 +102,7 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error { } tmp.Error = err.Error() - tmp.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} + tmp.Status.PostgresClusterStatus = ClusterStatusInvalid *p = Postgresql(tmp) @@ -113,9 +113,10 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error { if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil { tmp2.Error = err.Error() tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} - } else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil { + } else if err := validateCloneClusterDescription(tmp2.Spec.Clone); err != nil { + tmp2.Error = err.Error() - tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} + tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid } else { tmp2.Spec.ClusterName = clusterName } diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index d3a9f6ec2..9e5d01040 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -45,30 +45,38 @@ type PostgresUsersConfiguration struct { type KubernetesMetaConfiguration struct { PodServiceAccountName string `json:"pod_service_account_name,omitempty"` // TODO: change it to the proper json - PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` - PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` - PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"` - SpiloPrivileged bool `json:"spilo_privileged,omitempty"` - SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"` - WatchedNamespace string `json:"watched_namespace,omitempty"` - PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` - EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"` - EnableInitContainers *bool `json:"enable_init_containers,omitempty"` - EnableSidecars *bool `json:"enable_sidecars,omitempty"` - SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"` - ClusterDomain string `json:"cluster_domain,omitempty"` - OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` - InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` - PodRoleLabel string `json:"pod_role_label,omitempty"` - ClusterLabels map[string]string `json:"cluster_labels,omitempty"` - InheritedLabels []string `json:"inherited_labels,omitempty"` - DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"` - ClusterNameLabel string `json:"cluster_name_label,omitempty"` - NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` - CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"` + PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` + PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` + PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"` + SpiloPrivileged bool `json:"spilo_privileged,omitempty"` + SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"` + SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"` + SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"` + WatchedNamespace string `json:"watched_namespace,omitempty"` + PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` + EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"` + StorageResizeMode string `json:"storage_resize_mode,omitempty"` + EnableInitContainers *bool `json:"enable_init_containers,omitempty"` + EnableSidecars *bool `json:"enable_sidecars,omitempty"` + SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"` + ClusterDomain string `json:"cluster_domain,omitempty"` + OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` + InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` + InfrastructureRolesDefs []*config.InfrastructureRole `json:"infrastructure_roles_secrets,omitempty"` + PodRoleLabel string `json:"pod_role_label,omitempty"` + ClusterLabels map[string]string `json:"cluster_labels,omitempty"` + InheritedLabels []string `json:"inherited_labels,omitempty"` + InheritedAnnotations []string `json:"inherited_annotations,omitempty"` + DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"` + ClusterNameLabel string `json:"cluster_name_label,omitempty"` + DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"` + DeleteAnnotationNameKey string `json:"delete_annotation_name_key,omitempty"` + NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` + CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"` // TODO: use a proper toleration structure? PodToleration map[string]string `json:"toleration,omitempty"` PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"` + PodEnvironmentSecret string `json:"pod_environment_secret,omitempty"` PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"` EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` @@ -104,17 +112,22 @@ type LoadBalancerConfiguration struct { CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"` MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"` ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"` + ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"` } // AWSGCPConfiguration defines the configuration for AWS // TODO complete Google Cloud Platform (GCP) configuration type AWSGCPConfiguration struct { - WALES3Bucket string `json:"wal_s3_bucket,omitempty"` - AWSRegion string `json:"aws_region,omitempty"` - LogS3Bucket string `json:"log_s3_bucket,omitempty"` - KubeIAMRole string `json:"kube_iam_role,omitempty"` - AdditionalSecretMount string `json:"additional_secret_mount,omitempty"` - AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"` + WALES3Bucket string `json:"wal_s3_bucket,omitempty"` + AWSRegion string `json:"aws_region,omitempty"` + WALGSBucket string `json:"wal_gs_bucket,omitempty"` + GCPCredentials string `json:"gcp_credentials,omitempty"` + LogS3Bucket string `json:"log_s3_bucket,omitempty"` + KubeIAMRole string `json:"kube_iam_role,omitempty"` + AdditionalSecretMount string `json:"additional_secret_mount,omitempty"` + AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"` + EnableEBSGp3Migration bool `json:"enable_ebs_gp3_migration" default:"false"` + EnableEBSGp3MigrationMaxSize int64 `json:"enable_ebs_gp3_migration_max_size" default:"1000"` } // OperatorDebugConfiguration defines options for the debug mode @@ -125,16 +138,18 @@ type OperatorDebugConfiguration struct { // TeamsAPIConfiguration defines the configuration of TeamsAPI type TeamsAPIConfiguration struct { - EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` - TeamsAPIUrl string `json:"teams_api_url,omitempty"` - TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` - EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"` - EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"` - TeamAdminRole string `json:"team_admin_role,omitempty"` - PamRoleName string `json:"pam_role_name,omitempty"` - PamConfiguration string `json:"pam_configuration,omitempty"` - ProtectedRoles []string `json:"protected_role_names,omitempty"` - PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` + EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` + TeamsAPIUrl string `json:"teams_api_url,omitempty"` + TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` + EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"` + EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"` + TeamAdminRole string `json:"team_admin_role,omitempty"` + PamRoleName string `json:"pam_role_name,omitempty"` + PamConfiguration string `json:"pam_configuration,omitempty"` + ProtectedRoles []string `json:"protected_role_names,omitempty"` + PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` + EnablePostgresTeamCRD bool `json:"enable_postgres_team_crd,omitempty"` + EnablePostgresTeamCRDSuperusers bool `json:"enable_postgres_team_crd_superusers,omitempty"` } // LoggingRESTAPIConfiguration defines Logging API conf @@ -155,7 +170,7 @@ type ScalyrConfiguration struct { ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"` } -// Defines default configuration for connection pooler +// ConnectionPoolerConfiguration defines default configuration for connection pooler type ConnectionPoolerConfiguration struct { NumberOfInstances *int32 `json:"connection_pooler_number_of_instances,omitempty"` Schema string `json:"connection_pooler_schema,omitempty"` @@ -171,32 +186,35 @@ type ConnectionPoolerConfiguration struct { // OperatorLogicalBackupConfiguration defines configuration for logical backup type OperatorLogicalBackupConfiguration struct { - Schedule string `json:"logical_backup_schedule,omitempty"` - DockerImage string `json:"logical_backup_docker_image,omitempty"` - S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` - S3Region string `json:"logical_backup_s3_region,omitempty"` - S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` - S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` - S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` - S3SSE string `json:"logical_backup_s3_sse,omitempty"` + Schedule string `json:"logical_backup_schedule,omitempty"` + DockerImage string `json:"logical_backup_docker_image,omitempty"` + BackupProvider string `json:"logical_backup_provider,omitempty"` + S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` + S3Region string `json:"logical_backup_s3_region,omitempty"` + S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` + S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` + S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` + S3SSE string `json:"logical_backup_s3_sse,omitempty"` + GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"` } // OperatorConfigurationData defines the operation config type OperatorConfigurationData struct { - EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"` - EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"` - EtcdHost string `json:"etcd_host,omitempty"` - KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"` - DockerImage string `json:"docker_image,omitempty"` - Workers uint32 `json:"workers,omitempty"` - MinInstances int32 `json:"min_instances,omitempty"` - MaxInstances int32 `json:"max_instances,omitempty"` - ResyncPeriod Duration `json:"resync_period,omitempty"` - RepairPeriod Duration `json:"repair_period,omitempty"` - SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"` - ShmVolume *bool `json:"enable_shm_volume,omitempty"` - // deprecated in favour of SidecarContainers - SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` + EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"` + EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"` + EnablePgVersionEnvVar bool `json:"enable_pgversion_env_var,omitempty"` + EnableSpiloWalPathCompat bool `json:"enable_spilo_wal_path_compat,omitempty"` + EtcdHost string `json:"etcd_host,omitempty"` + KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"` + DockerImage string `json:"docker_image,omitempty"` + Workers uint32 `json:"workers,omitempty"` + MinInstances int32 `json:"min_instances,omitempty"` + MaxInstances int32 `json:"max_instances,omitempty"` + ResyncPeriod Duration `json:"resync_period,omitempty"` + RepairPeriod Duration `json:"repair_period,omitempty"` + SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"` + ShmVolume *bool `json:"enable_shm_volume,omitempty"` + SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers SidecarContainers []v1.Container `json:"sidecars,omitempty"` PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` diff --git a/pkg/apis/acid.zalan.do/v1/postgres_team_type.go b/pkg/apis/acid.zalan.do/v1/postgres_team_type.go new file mode 100644 index 000000000..5697c193e --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/postgres_team_type.go @@ -0,0 +1,33 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PostgresTeam defines Custom Resource Definition Object for team management. +type PostgresTeam struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresTeamSpec `json:"spec"` +} + +// PostgresTeamSpec defines the specification for the PostgresTeam TPR. +type PostgresTeamSpec struct { + AdditionalSuperuserTeams map[string][]string `json:"additionalSuperuserTeams,omitempty"` + AdditionalTeams map[string][]string `json:"additionalTeams,omitempty"` + AdditionalMembers map[string][]string `json:"additionalMembers,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PostgresTeamList defines a list of PostgresTeam definitions. +type PostgresTeamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []PostgresTeam `json:"items"` +} diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 5df82e947..bdae22a7c 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -29,13 +29,16 @@ type PostgresSpec struct { Patroni `json:"patroni,omitempty"` Resources `json:"resources,omitempty"` - EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"` - ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"` + EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"` + EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"` + ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"` TeamID string `json:"teamId"` DockerImage string `json:"dockerImage,omitempty"` - SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"` + SpiloRunAsUser *int64 `json:"spiloRunAsUser,omitempty"` + SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"` + SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"` // vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest // in that case the var evaluates to nil and the value is taken from the operator config @@ -51,12 +54,14 @@ type PostgresSpec struct { AllowedSourceRanges []string `json:"allowedSourceRanges"` NumberOfInstances int32 `json:"numberOfInstances"` - Users map[string]UserFlags `json:"users"` + Users map[string]UserFlags `json:"users,omitempty"` MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` - Clone CloneDescription `json:"clone"` + Clone *CloneDescription `json:"clone,omitempty"` ClusterName string `json:"-"` Databases map[string]string `json:"databases,omitempty"` PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"` + SchedulerName *string `json:"schedulerName,omitempty"` + NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` Tolerations []v1.Toleration `json:"tolerations,omitempty"` Sidecars []Sidecar `json:"sidecars,omitempty"` InitContainers []v1.Container `json:"initContainers,omitempty"` @@ -64,10 +69,10 @@ type PostgresSpec struct { ShmVolume *bool `json:"enableShmVolume,omitempty"` EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` - StandbyCluster *StandbyDescription `json:"standby"` - PodAnnotations map[string]string `json:"podAnnotations"` - ServiceAnnotations map[string]string `json:"serviceAnnotations"` - TLS *TLSDescription `json:"tls"` + StandbyCluster *StandbyDescription `json:"standby,omitempty"` + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + TLS *TLSDescription `json:"tls,omitempty"` AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"` // deprecated json tags @@ -109,14 +114,17 @@ type MaintenanceWindow struct { // Volume describes a single volume in the manifest. type Volume struct { Size string `json:"size"` - StorageClass string `json:"storageClass"` + StorageClass string `json:"storageClass,omitempty"` SubPath string `json:"subPath,omitempty"` + Iops *int64 `json:"iops,omitempty"` + Throughput *int64 `json:"throughput,omitempty"` } +// AdditionalVolume specs additional optional volumes for statefulset type AdditionalVolume struct { Name string `json:"name"` MountPath string `json:"mountPath"` - SubPath string `json:"subPath"` + SubPath string `json:"subPath,omitempty"` TargetContainers []string `json:"targetContainers"` VolumeSource v1.VolumeSource `json:"volumeSource"` } @@ -124,7 +132,7 @@ type AdditionalVolume struct { // PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values. type PostgresqlParam struct { PgVersion string `json:"version"` - Parameters map[string]string `json:"parameters"` + Parameters map[string]string `json:"parameters,omitempty"` } // ResourceDescription describes CPU and memory resources defined for a cluster. @@ -141,22 +149,23 @@ type Resources struct { // Patroni contains Patroni-specific configuration type Patroni struct { - InitDB map[string]string `json:"initdb"` - PgHba []string `json:"pg_hba"` - TTL uint32 `json:"ttl"` - LoopWait uint32 `json:"loop_wait"` - RetryTimeout uint32 `json:"retry_timeout"` - MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213 - Slots map[string]map[string]string `json:"slots"` - SynchronousMode bool `json:"synchronous_mode"` - SynchronousModeStrict bool `json:"synchronous_mode_strict"` + InitDB map[string]string `json:"initdb,omitempty"` + PgHba []string `json:"pg_hba,omitempty"` + TTL uint32 `json:"ttl,omitempty"` + LoopWait uint32 `json:"loop_wait,omitempty"` + RetryTimeout uint32 `json:"retry_timeout,omitempty"` + MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213 + Slots map[string]map[string]string `json:"slots,omitempty"` + SynchronousMode bool `json:"synchronous_mode,omitempty"` + SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"` } -//StandbyCluster +// StandbyDescription contains s3 wal path type StandbyDescription struct { S3WalPath string `json:"s3_wal_path,omitempty"` } +// TLSDescription specs TLS properties type TLSDescription struct { SecretName string `json:"secretName,omitempty"` CertificateFile string `json:"certificateFile,omitempty"` @@ -194,7 +203,7 @@ type PostgresStatus struct { PostgresClusterStatus string `json:"PostgresClusterStatus"` } -// Options for connection pooler +// ConnectionPooler Options for connection pooler // // TODO: prepared snippets of configuration, one can choose via type, e.g. // pgbouncer-large (with higher resources) or odyssey-small (with smaller diff --git a/pkg/apis/acid.zalan.do/v1/register.go b/pkg/apis/acid.zalan.do/v1/register.go index 1c30e35fb..9dcbf2baf 100644 --- a/pkg/apis/acid.zalan.do/v1/register.go +++ b/pkg/apis/acid.zalan.do/v1/register.go @@ -1,11 +1,10 @@ package v1 import ( + acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" ) // APIVersion of the `postgresql` and `operator` CRDs @@ -44,6 +43,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { // TODO: User uppercase CRDResourceKind of our types in the next major API version scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresql"), &Postgresql{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresqlList"), &PostgresqlList{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeam"), &PostgresTeam{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeamList"), &PostgresTeamList{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfiguration"), &OperatorConfiguration{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfigurationList"), diff --git a/pkg/apis/acid.zalan.do/v1/util.go b/pkg/apis/acid.zalan.do/v1/util.go index db6efcd71..a795ec685 100644 --- a/pkg/apis/acid.zalan.do/v1/util.go +++ b/pkg/apis/acid.zalan.do/v1/util.go @@ -72,7 +72,7 @@ func extractClusterName(clusterName string, teamName string) (string, error) { func validateCloneClusterDescription(clone *CloneDescription) error { // when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name - if clone.ClusterName != "" && clone.EndTimestamp == "" { + if clone != nil && clone.ClusterName != "" && clone.EndTimestamp == "" { if !serviceNameRegex.MatchString(clone.ClusterName) { return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q", serviceNameRegexString) diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index 28e9e8ca4..bf6875a82 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -163,7 +163,7 @@ var unmarshalCluster = []struct { "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(), }, - marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":"Invalid"}`), err: nil}, { about: "example with /status subresource", @@ -184,7 +184,7 @@ var unmarshalCluster = []struct { "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(), }, - marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`), err: nil}, { about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName", @@ -327,7 +327,7 @@ var unmarshalCluster = []struct { EndTime: mustParseTime("05:15"), }, }, - Clone: CloneDescription{ + Clone: &CloneDescription{ ClusterName: "acid-batman", }, ClusterName: "testcluster1", @@ -351,7 +351,7 @@ var unmarshalCluster = []struct { Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, Error: errors.New("name must match {TEAM}-{NAME} format").Error(), }, - marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`), err: nil}, { about: "example with clone", @@ -366,7 +366,7 @@ var unmarshalCluster = []struct { }, Spec: PostgresSpec{ TeamID: "acid", - Clone: CloneDescription{ + Clone: &CloneDescription{ ClusterName: "team-batman", }, ClusterName: "testcluster1", @@ -405,7 +405,7 @@ var unmarshalCluster = []struct { err: errors.New("unexpected end of JSON input")}, { about: "expect error on JSON with field's value malformatted", - in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), + in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`), out: Postgresql{}, marshal: []byte{}, err: errors.New("invalid character 'q' looking for beginning of value"), diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 5879c9b73..2f4104ce9 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -27,6 +27,7 @@ SOFTWARE. package v1 import ( + config "github.com/zalando/postgres-operator/pkg/util/config" corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -146,6 +147,16 @@ func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfigurati // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) { *out = *in + if in.SpiloRunAsUser != nil { + in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser + *out = new(int64) + **out = **in + } + if in.SpiloRunAsGroup != nil { + in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup + *out = new(int64) + **out = **in + } if in.SpiloFSGroup != nil { in, out := &in.SpiloFSGroup, &out.SpiloFSGroup *out = new(int64) @@ -168,6 +179,17 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura } out.OAuthTokenSecretName = in.OAuthTokenSecretName out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName + if in.InfrastructureRolesDefs != nil { + in, out := &in.InfrastructureRolesDefs, &out.InfrastructureRolesDefs + *out = make([]*config.InfrastructureRole, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(config.InfrastructureRole) + **out = **in + } + } + } if in.ClusterLabels != nil { in, out := &in.ClusterLabels, &out.ClusterLabels *out = make(map[string]string, len(*in)) @@ -180,6 +202,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura *out = make([]string, len(*in)) copy(*out, *in) } + if in.InheritedAnnotations != nil { + in, out := &in.InheritedAnnotations, &out.InheritedAnnotations + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.DownscalerAnnotations != nil { in, out := &in.DownscalerAnnotations, &out.DownscalerAnnotations *out = make([]string, len(*in)) @@ -502,7 +529,7 @@ func (in *PostgresPodResourcesDefaults) DeepCopy() *PostgresPodResourcesDefaults func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { *out = *in in.PostgresqlParam.DeepCopyInto(&out.PostgresqlParam) - out.Volume = in.Volume + in.Volume.DeepCopyInto(&out.Volume) in.Patroni.DeepCopyInto(&out.Patroni) out.Resources = in.Resources if in.EnableConnectionPooler != nil { @@ -510,11 +537,26 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { *out = new(bool) **out = **in } + if in.EnableReplicaConnectionPooler != nil { + in, out := &in.EnableReplicaConnectionPooler, &out.EnableReplicaConnectionPooler + *out = new(bool) + **out = **in + } if in.ConnectionPooler != nil { in, out := &in.ConnectionPooler, &out.ConnectionPooler *out = new(ConnectionPooler) (*in).DeepCopyInto(*out) } + if in.SpiloRunAsUser != nil { + in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser + *out = new(int64) + **out = **in + } + if in.SpiloRunAsGroup != nil { + in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup + *out = new(int64) + **out = **in + } if in.SpiloFSGroup != nil { in, out := &in.SpiloFSGroup, &out.SpiloFSGroup *out = new(int64) @@ -567,7 +609,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - in.Clone.DeepCopyInto(&out.Clone) + if in.Clone != nil { + in, out := &in.Clone, &out.Clone + *out = new(CloneDescription) + (*in).DeepCopyInto(*out) + } if in.Databases != nil { in, out := &in.Databases, &out.Databases *out = make(map[string]string, len(*in)) @@ -582,6 +628,16 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { (*out)[key] = *val.DeepCopy() } } + if in.SchedulerName != nil { + in, out := &in.SchedulerName, &out.SchedulerName + *out = new(string) + **out = **in + } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(corev1.NodeAffinity) + (*in).DeepCopyInto(*out) + } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations *out = make([]corev1.Toleration, len(*in)) @@ -675,6 +731,127 @@ func (in *PostgresStatus) DeepCopy() *PostgresStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTeam) DeepCopyInto(out *PostgresTeam) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeam. +func (in *PostgresTeam) DeepCopy() *PostgresTeam { + if in == nil { + return nil + } + out := new(PostgresTeam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresTeam) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTeamList) DeepCopyInto(out *PostgresTeamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresTeam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamList. +func (in *PostgresTeamList) DeepCopy() *PostgresTeamList { + if in == nil { + return nil + } + out := new(PostgresTeamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresTeamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) { + *out = *in + if in.AdditionalSuperuserTeams != nil { + in, out := &in.AdditionalSuperuserTeams, &out.AdditionalSuperuserTeams + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AdditionalTeams != nil { + in, out := &in.AdditionalTeams, &out.AdditionalTeams + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AdditionalMembers != nil { + in, out := &in.AdditionalMembers, &out.AdditionalMembers + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamSpec. +func (in *PostgresTeamSpec) DeepCopy() *PostgresTeamSpec { + if in == nil { + return nil + } + out := new(PostgresTeamSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) { *out = *in @@ -993,6 +1170,16 @@ func (in UserFlags) DeepCopy() UserFlags { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(int64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(int64) + **out = **in + } return } diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 31b8fa155..42515a7c0 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -5,7 +5,6 @@ package cluster import ( "context" "database/sql" - "encoding/json" "fmt" "reflect" "regexp" @@ -13,21 +12,12 @@ import ( "sync" "time" - "github.com/r3labs/diff" "github.com/sirupsen/logrus" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - policybeta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" "github.com/zalando/postgres-operator/pkg/spec" + pgteams "github.com/zalando/postgres-operator/pkg/teams" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" @@ -35,7 +25,17 @@ import ( "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/users" + "github.com/zalando/postgres-operator/pkg/util/volumes" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/reference" ) var ( @@ -49,31 +49,17 @@ var ( type Config struct { OpConfig config.Config RestConfig *rest.Config + PgTeamMap pgteams.PostgresTeamMap InfrastructureRoles map[string]spec.PgUser // inherited from the controller PodServiceAccount *v1.ServiceAccount PodServiceAccountRoleBinding *rbacv1.RoleBinding } -// K8S objects that are belongs to a connection pooler -type ConnectionPoolerObjects struct { - Deployment *appsv1.Deployment - Service *v1.Service - - // It could happen that a connection pooler was enabled, but the operator - // was not able to properly process a corresponding event or was restarted. - // In this case we will miss missing/require situation and a lookup function - // will not be installed. To avoid synchronizing it all the time to prevent - // this, we can remember the result in memory at least until the next - // restart. - LookupFunction bool -} - type kubeResources struct { Services map[PostgresRole]*v1.Service Endpoints map[PostgresRole]*v1.Endpoints Secrets map[types.UID]*v1.Secret Statefulset *appsv1.StatefulSet - ConnectionPooler *ConnectionPoolerObjects PodDisruptionBudget *policybeta1.PodDisruptionBudget //Pods are treated separately //PVCs are treated separately @@ -103,7 +89,9 @@ type Cluster struct { currentProcess Process processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex - + ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects + EBSVolumes map[string]volumes.VolumeProperties + VolumeResizer volumes.VolumeResizer } type compareStatefulsetResult struct { @@ -125,6 +113,10 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil }) + passwordEncryption, ok := pgSpec.Spec.PostgresqlParam.Parameters["password_encryption"] + if !ok { + passwordEncryption = "md5" + } cluster := &Cluster{ Config: cfg, @@ -136,7 +128,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres Secrets: make(map[types.UID]*v1.Secret), Services: make(map[PostgresRole]*v1.Service), Endpoints: make(map[PostgresRole]*v1.Endpoints)}, - userSyncStrategy: users.DefaultUserSyncStrategy{}, + userSyncStrategy: users.DefaultUserSyncStrategy{PasswordEncryption: passwordEncryption}, deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy}, podEventsQueue: podEventsQueue, KubeClient: kubeClient, @@ -146,6 +138,13 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres cluster.oauthTokenGetter = newSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName) cluster.patroni = patroni.New(cluster.logger) cluster.eventRecorder = eventRecorder + + cluster.EBSVolumes = make(map[string]volumes.VolumeProperties) + if cfg.OpConfig.StorageResizeMode != "pvc" || cfg.OpConfig.EnableEBSGp3Migration { + cluster.VolumeResizer = &volumes.EBSVolumeResizer{AWSRegion: cfg.OpConfig.AWSRegion} + + } + return cluster } @@ -181,34 +180,6 @@ func (c *Cluster) GetReference() *v1.ObjectReference { return ref } -// SetStatus of Postgres cluster -// TODO: eventually switch to updateStatus() for kubernetes 1.11 and above -func (c *Cluster) setStatus(status string) { - var pgStatus acidv1.PostgresStatus - pgStatus.PostgresClusterStatus = status - - patch, err := json.Marshal(struct { - PgStatus interface{} `json:"status"` - }{&pgStatus}) - - if err != nil { - c.logger.Errorf("could not marshal status: %v", err) - } - - // we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ), - // however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11) - // we should take advantage of it. - newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.clusterNamespace()).Patch( - context.TODO(), c.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status") - if err != nil { - c.logger.Errorf("could not update status: %v", err) - // return as newspec is empty, see PR654 - return - } - // update the spec, maintaining the new resourceVersion. - c.setSpec(newspec) -} - func (c *Cluster) isNewCluster() bool { return c.Status.Creating() } @@ -257,13 +228,13 @@ func (c *Cluster) Create() error { defer func() { if err == nil { - c.setStatus(acidv1.ClusterStatusRunning) //TODO: are you sure it's running? + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running? } else { - c.setStatus(acidv1.ClusterStatusAddFailed) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed) } }() - c.setStatus(acidv1.ClusterStatusCreating) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating) c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources") if err = c.enforceMinResourceLimits(&c.Spec); err != nil { @@ -277,7 +248,7 @@ func (c *Cluster) Create() error { } if role == Master { // replica endpoint will be created by the replica service. Master endpoint needs to be created by us, - // since the corresponding master service doesn't define any selectors. + // since the corresponding master service does not define any selectors. ep, err = c.createEndpoint(role) if err != nil { return fmt.Errorf("could not create %s endpoint: %v", role, err) @@ -371,19 +342,7 @@ func (c *Cluster) Create() error { // // Do not consider connection pooler as a strict requirement, and if // something fails, report warning - if c.needConnectionPooler() { - if c.ConnectionPooler != nil { - c.logger.Warning("Connection pooler already exists in the cluster") - return nil - } - connectionPooler, err := c.createConnectionPooler(c.installLookupFunction) - if err != nil { - c.logger.Warningf("could not create connection pooler: %v", err) - return nil - } - c.logger.Infof("connection pooler %q has been successfully created", - util.NameFromMeta(connectionPooler.Deployment.ObjectMeta)) - } + c.createConnectionPooler(c.installLookupFunction) return nil } @@ -396,11 +355,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa //TODO: improve me if *c.Statefulset.Spec.Replicas != *statefulSet.Spec.Replicas { match = false - reasons = append(reasons, "new statefulset's number of replicas doesn't match the current one") + reasons = append(reasons, "new statefulset's number of replicas does not match the current one") } if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) { match = false - reasons = append(reasons, "new statefulset's annotations doesn't match the current one") + reasons = append(reasons, "new statefulset's annotations does not match the current one") } needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons) @@ -417,24 +376,24 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's serviceAccountName service account name doesn't match the current one") + reasons = append(reasons, "new statefulset's serviceAccountName service account name does not match the current one") } if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds doesn't match the current one") + reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds does not match the current one") } if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Affinity, statefulSet.Spec.Template.Spec.Affinity) { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's pod affinity doesn't match the current one") + reasons = append(reasons, "new statefulset's pod affinity does not match the current one") } // Some generated fields like creationTimestamp make it not possible to use DeepCompare on Spec.Template.ObjectMeta if !reflect.DeepEqual(c.Statefulset.Spec.Template.Labels, statefulSet.Spec.Template.Labels) { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's metadata labels doesn't match the current one") + reasons = append(reasons, "new statefulset's metadata labels does not match the current one") } if (c.Statefulset.Spec.Selector != nil) && (statefulSet.Spec.Selector != nil) { if !reflect.DeepEqual(c.Statefulset.Spec.Selector.MatchLabels, statefulSet.Spec.Selector.MatchLabels) { @@ -445,7 +404,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa return &compareStatefulsetResult{} } needsReplace = true - reasons = append(reasons, "new statefulset's selector doesn't match the current one") + reasons = append(reasons, "new statefulset's selector does not match the current one") } } @@ -453,7 +412,13 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa match = false needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's pod template metadata annotations doesn't match the current one") + reasons = append(reasons, "new statefulset's pod template metadata annotations does not match the current one") + } + if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.SecurityContext, statefulSet.Spec.Template.Spec.SecurityContext) { + match = false + needsReplace = true + needsRollUpdate = true + reasons = append(reasons, "new statefulset's pod template security context in spec does not match the current one") } if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) { needsReplace = true @@ -464,26 +429,34 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa // Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta if name != statefulSet.Spec.VolumeClaimTemplates[i].Name { needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d doesn't match the current one", i)) + reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i)) continue } if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) { needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q doesn't match the current one", name)) + reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q does not match the current one", name)) } if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) { name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q doesn't match the current one", name)) + reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q does not match the current one", name)) } } + // we assume any change in priority happens by rolling out a new priority class + // changing the priority value in an existing class is not supproted + if c.Statefulset.Spec.Template.Spec.PriorityClassName != statefulSet.Spec.Template.Spec.PriorityClassName { + match = false + needsReplace = true + needsRollUpdate = true + reasons = append(reasons, "new statefulset's pod priority class in spec does not match the current one") + } + // lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image // until they are re-created for other reasons, for example node rotation if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) { needsReplace = true - needsRollUpdate = false - reasons = append(reasons, "lazy Spilo update: new statefulset's pod image doesn't match the current one") + reasons = append(reasons, "lazy Spilo update: new statefulset's pod image does not match the current one") } if needsRollUpdate || needsReplace { @@ -515,20 +488,22 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe } checks := []containerCheck{ - newCheck("new statefulset %s's %s (index %d) name doesn't match the current one", + newCheck("new statefulset %s's %s (index %d) name does not match the current one", func(a, b v1.Container) bool { return a.Name != b.Name }), - newCheck("new statefulset %s's %s (index %d) ports don't match the current one", + newCheck("new statefulset %s's %s (index %d) ports do not match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }), - newCheck("new statefulset %s's %s (index %d) resources don't match the current ones", + newCheck("new statefulset %s's %s (index %d) resources do not match the current ones", func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }), - newCheck("new statefulset %s's %s (index %d) environment doesn't match the current one", + newCheck("new statefulset %s's %s (index %d) environment does not match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }), - newCheck("new statefulset %s's %s (index %d) environment sources don't match the current one", + newCheck("new statefulset %s's %s (index %d) environment sources do not match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }), + newCheck("new statefulset %s's %s (index %d) security context does not match the current one", + func(a, b v1.Container) bool { return !reflect.DeepEqual(a.SecurityContext, b.SecurityContext) }), } if !c.OpConfig.EnableLazySpiloUpgrade { - checks = append(checks, newCheck("new statefulset %s's %s (index %d) image doesn't match the current one", + checks = append(checks, newCheck("new statefulset %s's %s (index %d) image does not match the current one", func(a, b v1.Container) bool { return a.Image != b.Image })) } @@ -593,7 +568,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err) } if isSmaller { - c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit) + c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be increased", cpuLimit, minCPULimit) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit) spec.Resources.ResourceLimits.CPU = minCPULimit } @@ -606,7 +581,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err) } if isSmaller { - c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit) + c.logger.Warningf("defined memory limit %s is below required minimum %s and will be increased", memoryLimit, minMemoryLimit) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit) spec.Resources.ResourceLimits.Memory = minMemoryLimit } @@ -621,34 +596,40 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { // for a cluster that had no such job before. In this case a missing job is not an error. func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { updateFailed := false + syncStatetfulSet := false c.mu.Lock() defer c.mu.Unlock() - c.setStatus(acidv1.ClusterStatusUpdating) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating) c.setSpec(newSpec) defer func() { if updateFailed { - c.setStatus(acidv1.ClusterStatusUpdateFailed) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed) } else { - c.setStatus(acidv1.ClusterStatusRunning) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) } }() - if oldSpec.Spec.PostgresqlParam.PgVersion != newSpec.Spec.PostgresqlParam.PgVersion { // PG versions comparison + logNiceDiff(c.logger, oldSpec, newSpec) + + if oldSpec.Spec.PostgresqlParam.PgVersion > newSpec.Spec.PostgresqlParam.PgVersion { c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) - //we need that hack to generate statefulset with the old version + // we need that hack to generate statefulset with the old version newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion + } else if oldSpec.Spec.PostgresqlParam.PgVersion < newSpec.Spec.PostgresqlParam.PgVersion { + c.logger.Infof("postgresql version increased (%q -> %q), major version upgrade can be done manually after StatefulSet Sync", + oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) + syncStatetfulSet = true } // Service if !reflect.DeepEqual(c.generateService(Master, &oldSpec.Spec), c.generateService(Master, &newSpec.Spec)) || !reflect.DeepEqual(c.generateService(Replica, &oldSpec.Spec), c.generateService(Replica, &newSpec.Spec)) { - c.logger.Debugf("syncing services") if err := c.syncServices(); err != nil { c.logger.Errorf("could not sync services: %v", err) updateFailed = true @@ -659,7 +640,8 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // initUsers. Check if it needs to be called. sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) && reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases) - needConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec) + needConnectionPooler := needMasterConnectionPoolerWorker(&newSpec.Spec) || + needReplicaConnectionPoolerWorker(&newSpec.Spec) if !sameUsers || needConnectionPooler { c.logger.Debugf("syncing secrets") if err := c.initUsers(); err != nil { @@ -678,13 +660,21 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // Volume if oldSpec.Spec.Size != newSpec.Spec.Size { - c.logger.Debugf("syncing persistent volumes") c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume) - - if err := c.syncVolumes(); err != nil { - c.logger.Errorf("could not sync persistent volumes: %v", err) - updateFailed = true + c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode) + if c.OpConfig.StorageResizeMode == "pvc" { + if err := c.syncVolumeClaims(); err != nil { + c.logger.Errorf("could not sync persistent volume claims: %v", err) + updateFailed = true + } + } else if c.OpConfig.StorageResizeMode == "ebs" { + if err := c.syncVolumes(); err != nil { + c.logger.Errorf("could not sync persistent volumes: %v", err) + updateFailed = true + } } + } else { + c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") } // Statefulset @@ -711,8 +701,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { updateFailed = true return } - if !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) { + if syncStatetfulSet || !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) { c.logger.Debugf("syncing statefulsets") + syncStatetfulSet = false // TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet if err := c.syncStatefulSet(); err != nil { c.logger.Errorf("could not sync statefulsets: %v", err) @@ -791,9 +782,13 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } } - // sync connection pooler - if _, err := c.syncConnectionPooler(oldSpec, newSpec, - c.installLookupFunction); err != nil { + // Sync connection pooler. Before actually doing sync reset lookup + // installation flag, since manifest updates could add another db which we + // need to process. In the future we may want to do this more careful and + // check which databases we need to process, but even repeating the whole + // installation process should be good enough. + + if _, err := c.syncConnectionPooler(oldSpec, newSpec, c.installLookupFunction); err != nil { c.logger.Errorf("could not sync connection pooler: %v", err) updateFailed = true } @@ -801,6 +796,20 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { return nil } +func syncResources(a, b *v1.ResourceRequirements) bool { + for _, res := range []v1.ResourceName{ + v1.ResourceCPU, + v1.ResourceMemory, + } { + if !a.Limits[res].Equal(b.Limits[res]) || + !a.Requests[res].Equal(b.Requests[res]) { + return true + } + } + + return false +} + // Delete deletes the cluster and cleans up all objects associated with it (including statefulsets). // The deletion order here is somewhat significant, because Patroni, when running with the Kubernetes // DCS, reuses the master's endpoint to store the leader related metadata. If we remove the endpoint @@ -821,14 +830,8 @@ func (c *Cluster) Delete() { c.logger.Warningf("could not delete statefulset: %v", err) } - for _, obj := range c.Secrets { - if doDelete, user := c.shouldDeleteSecret(obj); !doDelete { - c.logger.Warningf("not removing secret %q for the system user %q", obj.GetName(), user) - continue - } - if err := c.deleteSecret(obj); err != nil { - c.logger.Warningf("could not delete secret: %v", err) - } + if err := c.deleteSecrets(); err != nil { + c.logger.Warningf("could not delete secrets: %v", err) } if err := c.deletePodDisruptionBudget(); err != nil { @@ -855,9 +858,12 @@ func (c *Cluster) Delete() { // Delete connection pooler objects anyway, even if it's not mentioned in the // manifest, just to not keep orphaned components in case if something went // wrong - if err := c.deleteConnectionPooler(); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) + for _, role := range [2]PostgresRole{Master, Replica} { + if err := c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } } + } //NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status). @@ -927,7 +933,7 @@ func (c *Cluster) initSystemUsers() { // Connection pooler user is an exception, if requested it's going to be // created by operator as a normal pgUser - if c.needConnectionPooler() { + if needConnectionPooler(&c.Spec) { // initialize empty connection pooler if not done yet if c.Spec.ConnectionPooler == nil { c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{} @@ -985,32 +991,42 @@ func (c *Cluster) initPreparedDatabaseRoles() error { } for preparedDbName, preparedDB := range c.Spec.PreparedDatabases { + // get list of prepared schemas to set in search_path + preparedSchemas := preparedDB.PreparedSchemas + if len(preparedDB.PreparedSchemas) == 0 { + preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}} + } + + var searchPath strings.Builder + searchPath.WriteString(constants.DefaultSearchPath) + for preparedSchemaName := range preparedSchemas { + searchPath.WriteString(", " + preparedSchemaName) + } + // default roles per database - if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName); err != nil { + if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String()); err != nil { return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) } if preparedDB.DefaultUsers { - if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName); err != nil { + if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String()); err != nil { return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) } } // default roles per database schema - preparedSchemas := preparedDB.PreparedSchemas - if len(preparedDB.PreparedSchemas) == 0 { - preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}} - } for preparedSchemaName, preparedSchema := range preparedSchemas { if preparedSchema.DefaultRoles == nil || *preparedSchema.DefaultRoles { if err := c.initDefaultRoles(defaultRoles, preparedDbName+constants.OwnerRoleNameSuffix, - preparedDbName+"_"+preparedSchemaName); err != nil { + preparedDbName+"_"+preparedSchemaName, + constants.DefaultSearchPath+", "+preparedSchemaName); err != nil { return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err) } if preparedSchema.DefaultUsers { if err := c.initDefaultRoles(defaultUsers, preparedDbName+constants.OwnerRoleNameSuffix, - preparedDbName+"_"+preparedSchemaName); err != nil { + preparedDbName+"_"+preparedSchemaName, + constants.DefaultSearchPath+", "+preparedSchemaName); err != nil { return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err) } } @@ -1020,7 +1036,7 @@ func (c *Cluster) initPreparedDatabaseRoles() error { return nil } -func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string) error { +func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string, searchPath string) error { for defaultRole, inherits := range defaultRoles { @@ -1044,12 +1060,13 @@ func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix } newRole := spec.PgUser{ - Origin: spec.RoleOriginBootstrap, - Name: roleName, - Password: util.RandomPassword(constants.PasswordLength), - Flags: flags, - MemberOf: memberOf, - AdminRole: adminRole, + Origin: spec.RoleOriginBootstrap, + Name: roleName, + Password: util.RandomPassword(constants.PasswordLength), + Flags: flags, + MemberOf: memberOf, + Parameters: map[string]string{"search_path": searchPath}, + AdminRole: adminRole, } if currentRole, present := c.pgUsers[roleName]; present { c.pgUsers[roleName] = c.resolveNameConflict(¤tRole, &newRole) @@ -1107,7 +1124,7 @@ func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) e if c.shouldAvoidProtectedOrSystemRole(username, "API role") { continue } - if c.OpConfig.EnableTeamSuperuser || isPostgresSuperuserTeam { + if (c.OpConfig.EnableTeamSuperuser && teamID == c.Spec.TeamID) || isPostgresSuperuserTeam { flags = append(flags, constants.RoleFlagSuperuser) } else { if c.OpConfig.TeamAdminRole != "" { @@ -1136,17 +1153,38 @@ func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) e func (c *Cluster) initHumanUsers() error { var clusterIsOwnedBySuperuserTeam bool + superuserTeams := []string{} + + if c.OpConfig.EnablePostgresTeamCRDSuperusers { + superuserTeams = c.PgTeamMap.GetAdditionalSuperuserTeams(c.Spec.TeamID, true) + } for _, postgresSuperuserTeam := range c.OpConfig.PostgresSuperuserTeams { - err := c.initTeamMembers(postgresSuperuserTeam, true) - if err != nil { - return fmt.Errorf("Cannot create a team %q of Postgres superusers: %v", postgresSuperuserTeam, err) + if !(util.SliceContains(superuserTeams, postgresSuperuserTeam)) { + superuserTeams = append(superuserTeams, postgresSuperuserTeam) } - if postgresSuperuserTeam == c.Spec.TeamID { + } + + for _, superuserTeam := range superuserTeams { + err := c.initTeamMembers(superuserTeam, true) + if err != nil { + return fmt.Errorf("Cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err) + } + if superuserTeam == c.Spec.TeamID { clusterIsOwnedBySuperuserTeam = true } } + additionalTeams := c.PgTeamMap.GetAdditionalTeams(c.Spec.TeamID, true) + for _, additionalTeam := range additionalTeams { + if !(util.SliceContains(superuserTeams, additionalTeam)) { + err := c.initTeamMembers(additionalTeam, false) + if err != nil { + return fmt.Errorf("Cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err) + } + } + } + if clusterIsOwnedBySuperuserTeam { c.logger.Infof("Team %q owning the cluster is also a team of superusers. Created superuser roles for its members instead of admin roles.", c.Spec.TeamID) return nil @@ -1154,7 +1192,7 @@ func (c *Cluster) initHumanUsers() error { err := c.initTeamMembers(c.Spec.TeamID, false) if err != nil { - return fmt.Errorf("Cannot create a team %q of admins owning the PG cluster: %v", c.Spec.TeamID, err) + return fmt.Errorf("Cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err) } return nil @@ -1300,11 +1338,6 @@ func (c *Cluster) Unlock() { c.mu.Unlock() } -func (c *Cluster) shouldDeleteSecret(secret *v1.Secret) (delete bool, userName string) { - secretUser := string(secret.Data["username"]) - return (secretUser != c.OpConfig.ReplicationUsername && secretUser != c.OpConfig.SuperUsername), secretUser -} - type simpleActionWithResult func() error type clusterObjectGet func(name string) (spec.NamespacedName, error) @@ -1345,7 +1378,7 @@ func (c *Cluster) deleteClusterObject( objType, namespacedName) if err = del(name); err != nil { - return fmt.Errorf("could not Patroni delete cluster object %q with name %q: %v", + return fmt.Errorf("could not delete Patroni cluster object %q with name %q: %v", objType, namespacedName, err) } @@ -1395,119 +1428,3 @@ func (c *Cluster) deletePatroniClusterConfigMaps() error { return c.deleteClusterObject(get, deleteConfigMapFn, "configmap") } - -// Test if two connection pooler configuration needs to be synced. For simplicity -// compare not the actual K8S objects, but the configuration itself and request -// sync if there is any difference. -func (c *Cluster) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { - reasons = []string{} - sync = false - - changelog, err := diff.Diff(oldSpec, newSpec) - if err != nil { - c.logger.Infof("Cannot get diff, do not do anything, %+v", err) - return false, reasons - } - - if len(changelog) > 0 { - sync = true - } - - for _, change := range changelog { - msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'", - change.Type, change.Path, change.From, change.To) - reasons = append(reasons, msg) - } - - return sync, reasons -} - -func syncResources(a, b *v1.ResourceRequirements) bool { - for _, res := range []v1.ResourceName{ - v1.ResourceCPU, - v1.ResourceMemory, - } { - if !a.Limits[res].Equal(b.Limits[res]) || - !a.Requests[res].Equal(b.Requests[res]) { - return true - } - } - - return false -} - -// Check if we need to synchronize connection pooler deployment due to new -// defaults, that are different from what we see in the DeploymentSpec -func (c *Cluster) needSyncConnectionPoolerDefaults( - spec *acidv1.ConnectionPooler, - deployment *appsv1.Deployment) (sync bool, reasons []string) { - - reasons = []string{} - sync = false - - config := c.OpConfig.ConnectionPooler - podTemplate := deployment.Spec.Template - poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] - - if spec == nil { - spec = &acidv1.ConnectionPooler{} - } - - if spec.NumberOfInstances == nil && - *deployment.Spec.Replicas != *config.NumberOfInstances { - - sync = true - msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)", - *deployment.Spec.Replicas, *config.NumberOfInstances) - reasons = append(reasons, msg) - } - - if spec.DockerImage == "" && - poolerContainer.Image != config.Image { - - sync = true - msg := fmt.Sprintf("DockerImage is different (having %s, required %s)", - poolerContainer.Image, config.Image) - reasons = append(reasons, msg) - } - - expectedResources, err := generateResourceRequirements(spec.Resources, - c.makeDefaultConnectionPoolerResources()) - - // An error to generate expected resources means something is not quite - // right, but for the purpose of robustness do not panic here, just report - // and ignore resources comparison (in the worst case there will be no - // updates for new resource values). - if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { - sync = true - msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", - poolerContainer.Resources, expectedResources) - reasons = append(reasons, msg) - } - - if err != nil { - c.logger.Warningf("Cannot generate expected resources, %v", err) - } - - for _, env := range poolerContainer.Env { - if spec.User == "" && env.Name == "PGUSER" { - ref := env.ValueFrom.SecretKeyRef.LocalObjectReference - - if ref.Name != c.credentialSecretName(config.User) { - sync = true - msg := fmt.Sprintf("pooler user is different (having %s, required %s)", - ref.Name, config.User) - reasons = append(reasons, msg) - } - } - - if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema { - sync = true - msg := fmt.Sprintf("pooler schema is different (having %s, required %s)", - env.Value, config.Schema) - reasons = append(reasons, msg) - } - } - - return sync, reasons -} diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 4562d525e..1f6510e65 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -12,7 +12,6 @@ import ( "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/teams" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" ) @@ -334,36 +333,6 @@ func TestInitHumanUsersWithSuperuserTeams(t *testing.T) { } } -func TestShouldDeleteSecret(t *testing.T) { - testName := "TestShouldDeleteSecret" - - tests := []struct { - secret *v1.Secret - outcome bool - }{ - { - secret: &v1.Secret{Data: map[string][]byte{"username": []byte("foobar")}}, - outcome: true, - }, - { - secret: &v1.Secret{Data: map[string][]byte{"username": []byte(superUserName)}}, - - outcome: false, - }, - { - secret: &v1.Secret{Data: map[string][]byte{"username": []byte(replicationUserName)}}, - outcome: false, - }, - } - - for _, tt := range tests { - if outcome, username := cl.shouldDeleteSecret(tt.secret); outcome != tt.outcome { - t.Errorf("%s expects the check for deletion of the username %q secret to return %t, got %t", - testName, username, tt.outcome, outcome) - } - } -} - func TestPodAnnotations(t *testing.T) { testName := "TestPodAnnotations" tests := []struct { diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go new file mode 100644 index 000000000..1d1d609e4 --- /dev/null +++ b/pkg/cluster/connection_pooler.go @@ -0,0 +1,905 @@ +package cluster + +import ( + "context" + "fmt" + "strings" + + "github.com/r3labs/diff" + "github.com/sirupsen/logrus" + acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/constants" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" +) + +// ConnectionPoolerObjects K8s objects that are belong to connection pooler +type ConnectionPoolerObjects struct { + Deployment *appsv1.Deployment + Service *v1.Service + Name string + ClusterName string + Namespace string + Role PostgresRole + // It could happen that a connection pooler was enabled, but the operator + // was not able to properly process a corresponding event or was restarted. + // In this case we will miss missing/require situation and a lookup function + // will not be installed. To avoid synchronizing it all the time to prevent + // this, we can remember the result in memory at least until the next + // restart. + LookupFunction bool + // Careful with referencing cluster.spec this object pointer changes + // during runtime and lifetime of cluster +} + +func (c *Cluster) connectionPoolerName(role PostgresRole) string { + name := c.Name + "-pooler" + if role == Replica { + name = name + "-repl" + } + return name +} + +// isConnectionPoolerEnabled +func needConnectionPooler(spec *acidv1.PostgresSpec) bool { + return needMasterConnectionPoolerWorker(spec) || + needReplicaConnectionPoolerWorker(spec) +} + +func needMasterConnectionPooler(spec *acidv1.PostgresSpec) bool { + return needMasterConnectionPoolerWorker(spec) +} + +func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || + (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) +} + +func needReplicaConnectionPooler(spec *acidv1.PostgresSpec) bool { + return needReplicaConnectionPoolerWorker(spec) +} + +func needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return spec.EnableReplicaConnectionPooler != nil && + *spec.EnableReplicaConnectionPooler +} + +// Return connection pooler labels selector, which should from one point of view +// inherit most of the labels from the cluster itself, but at the same time +// have e.g. different `application` label, so that recreatePod operation will +// not interfere with it (it lists all the pods via labels, and if there would +// be no difference, it will recreate also pooler pods). +func (c *Cluster) connectionPoolerLabels(role PostgresRole, addExtraLabels bool) *metav1.LabelSelector { + poolerLabels := c.labelsSet(addExtraLabels) + + // TODO should be config values + poolerLabels["application"] = "db-connection-pooler" + poolerLabels["connection-pooler"] = c.connectionPoolerName(role) + + if addExtraLabels { + extraLabels := map[string]string{} + extraLabels[c.OpConfig.PodRoleLabel] = string(role) + + poolerLabels = labels.Merge(poolerLabels, extraLabels) + } + + return &metav1.LabelSelector{ + MatchLabels: poolerLabels, + MatchExpressions: nil, + } +} + +// Prepare the database for connection pooler to be used, i.e. install lookup +// function (do it first, because it should be fast and if it didn't succeed, +// it doesn't makes sense to create more K8S objects. At this moment we assume +// that necessary connection pooler user exists. +// +// After that create all the objects for connection pooler, namely a deployment +// with a chosen pooler and a service to expose it. + +// have connectionpooler name in the cp object to have it immutable name +// add these cp related functions to a new cp file +// opConfig, cluster, and database name +func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncReason, error) { + var reason SyncReason + c.setProcessName("creating connection pooler") + + //this is essentially sync with nil as oldSpec + if reason, err := c.syncConnectionPooler(nil, &c.Postgresql, LookupFunction); err != nil { + return reason, err + } + return reason, nil +} + +// +// Generate pool size related environment variables. +// +// MAX_DB_CONN would specify the global maximum for connections to a target +// database. +// +// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough. +// +// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when +// most of the queries coming through a connection pooler are from the same +// user to the same db). In case if we want to spin up more connection pooler +// instances, take this into account and maintain the same number of +// connections. +// +// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload +// have to wait for spinning up a new connections. +// +// RESERVE_SIZE is how many additional connections to allow for a pooler. +func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar { + spec := &c.Spec + effectiveMode := util.Coalesce( + spec.ConnectionPooler.Mode, + c.OpConfig.ConnectionPooler.Mode) + + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + effectiveMaxDBConn := util.CoalesceInt32( + spec.ConnectionPooler.MaxDBConnections, + c.OpConfig.ConnectionPooler.MaxDBConnections) + + if effectiveMaxDBConn == nil { + effectiveMaxDBConn = k8sutil.Int32ToPointer( + constants.ConnectionPoolerMaxDBConnections) + } + + maxDBConn := *effectiveMaxDBConn / *numberOfInstances + + defaultSize := maxDBConn / 2 + minSize := defaultSize / 2 + reserveSize := minSize + + return []v1.EnvVar{ + { + Name: "CONNECTION_POOLER_PORT", + Value: fmt.Sprint(pgPort), + }, + { + Name: "CONNECTION_POOLER_MODE", + Value: effectiveMode, + }, + { + Name: "CONNECTION_POOLER_DEFAULT_SIZE", + Value: fmt.Sprint(defaultSize), + }, + { + Name: "CONNECTION_POOLER_MIN_SIZE", + Value: fmt.Sprint(minSize), + }, + { + Name: "CONNECTION_POOLER_RESERVE_SIZE", + Value: fmt.Sprint(reserveSize), + }, + { + Name: "CONNECTION_POOLER_MAX_CLIENT_CONN", + Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections), + }, + { + Name: "CONNECTION_POOLER_MAX_DB_CONN", + Value: fmt.Sprint(maxDBConn), + }, + } +} + +func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( + *v1.PodTemplateSpec, error) { + spec := &c.Spec + gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) + resources, err := generateResourceRequirements( + spec.ConnectionPooler.Resources, + makeDefaultConnectionPoolerResources(&c.OpConfig)) + + effectiveDockerImage := util.Coalesce( + spec.ConnectionPooler.DockerImage, + c.OpConfig.ConnectionPooler.Image) + + effectiveSchema := util.Coalesce( + spec.ConnectionPooler.Schema, + c.OpConfig.ConnectionPooler.Schema) + + if err != nil { + return nil, fmt.Errorf("could not generate resource requirements: %v", err) + } + + secretSelector := func(key string) *v1.SecretKeySelector { + effectiveUser := util.Coalesce( + spec.ConnectionPooler.User, + c.OpConfig.ConnectionPooler.User) + + return &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: c.credentialSecretName(effectiveUser), + }, + Key: key, + } + } + + envVars := []v1.EnvVar{ + { + Name: "PGHOST", + Value: c.serviceAddress(role), + }, + { + Name: "PGPORT", + Value: c.servicePort(role), + }, + { + Name: "PGUSER", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("username"), + }, + }, + // the convention is to use the same schema name as + // connection pooler username + { + Name: "PGSCHEMA", + Value: effectiveSchema, + }, + { + Name: "PGPASSWORD", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("password"), + }, + }, + } + envVars = append(envVars, c.getConnectionPoolerEnvVars()...) + + poolerContainer := v1.Container{ + Name: connectionPoolerContainer, + Image: effectiveDockerImage, + ImagePullPolicy: v1.PullIfNotPresent, + Resources: *resources, + Ports: []v1.ContainerPort{ + { + ContainerPort: pgPort, + Protocol: v1.ProtocolTCP, + }, + }, + Env: envVars, + ReadinessProbe: &v1.Probe{ + Handler: v1.Handler{ + TCPSocket: &v1.TCPSocketAction{ + Port: intstr.IntOrString{IntVal: pgPort}, + }, + }, + }, + } + + podTemplate := &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: c.connectionPoolerLabels(role, true).MatchLabels, + Namespace: c.Namespace, + Annotations: c.annotationsSet(c.generatePodAnnotations(spec)), + }, + Spec: v1.PodSpec{ + ServiceAccountName: c.OpConfig.PodServiceAccountName, + TerminationGracePeriodSeconds: &gracePeriod, + Containers: []v1.Container{poolerContainer}, + // TODO: add tolerations to scheduler pooler on the same node + // as database + //Tolerations: *tolerationsSpec, + }, + } + + return podTemplate, nil +} + +func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *ConnectionPoolerObjects) ( + *appsv1.Deployment, error) { + spec := &c.Spec + + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + podTemplate, err := c.generateConnectionPoolerPodTemplate(connectionPooler.Role) + + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + if *numberOfInstances < constants.ConnectionPoolerMinInstances { + msg := "Adjusted number of connection pooler instances from %d to %d" + c.logger.Warningf(msg, *numberOfInstances, constants.ConnectionPoolerMinInstances) + + *numberOfInstances = constants.ConnectionPoolerMinInstances + } + + if err != nil { + return nil, err + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: connectionPooler.Name, + Namespace: connectionPooler.Namespace, + Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels, + Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this deployment, but there is a hope that this object + // will be garbage collected if something went wrong and operator + // didn't deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: numberOfInstances, + Selector: c.connectionPoolerLabels(connectionPooler.Role, false), + Template: *podTemplate, + }, + } + + return deployment, nil +} + +func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPoolerObjects) *v1.Service { + + spec := &c.Spec + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + + serviceSpec := v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: connectionPooler.Name, + Port: pgPort, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(connectionPooler.Role)}, + }, + }, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler": c.connectionPoolerName(connectionPooler.Role), + }, + } + + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: connectionPooler.Name, + Namespace: connectionPooler.Namespace, + Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels, + Annotations: c.annotationsSet(c.generateServiceAnnotations(connectionPooler.Role, spec)), + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this service, but there is a hope that this object will + // be garbage collected if something went wrong and operator didn't + // deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: serviceSpec, + } + + return service +} + +//delete connection pooler +func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { + c.logger.Infof("deleting connection pooler spilo-role=%s", role) + + // Lack of connection pooler objects is not a fatal error, just log it if + // it was present before in the manifest + if c.ConnectionPooler[role] == nil || role == "" { + c.logger.Debugf("no connection pooler to delete") + return nil + } + + // Clean up the deployment object. If deployment resource we've remembered + // is somehow empty, try to delete based on what would we generate + var deployment *appsv1.Deployment + deployment = c.ConnectionPooler[role].Deployment + + policy := metav1.DeletePropagationForeground + options := metav1.DeleteOptions{PropagationPolicy: &policy} + + if deployment != nil { + + // set delete propagation policy to foreground, so that replica set will be + // also deleted. + + err = c.KubeClient. + Deployments(c.Namespace). + Delete(context.TODO(), deployment.Name, options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("connection pooler deployment was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete connection pooler deployment: %v", err) + } + + c.logger.Infof("connection pooler deployment %s has been deleted for role %s", deployment.Name, role) + } + + // Repeat the same for the service object + var service *v1.Service + service = c.ConnectionPooler[role].Service + if service == nil { + c.logger.Debugf("no connection pooler service object to delete") + } else { + + err = c.KubeClient. + Services(c.Namespace). + Delete(context.TODO(), service.Name, options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("connection pooler service was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete connection pooler service: %v", err) + } + + c.logger.Infof("connection pooler service %s has been deleted for role %s", service.Name, role) + } + + c.ConnectionPooler[role].Deployment = nil + c.ConnectionPooler[role].Service = nil + return nil +} + +//delete connection pooler +func (c *Cluster) deleteConnectionPoolerSecret() (err error) { + // Repeat the same for the secret object + secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) + + secret, err := c.KubeClient. + Secrets(c.Namespace). + Get(context.TODO(), secretName, metav1.GetOptions{}) + + if err != nil { + c.logger.Debugf("could not get connection pooler secret %s: %v", secretName, err) + } else { + if err = c.deleteSecret(secret.UID, *secret); err != nil { + return fmt.Errorf("could not delete pooler secret: %v", err) + } + } + return nil +} + +// Perform actual patching of a connection pooler deployment, assuming that all +// the check were already done before. +func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) { + if newDeployment == nil { + return nil, fmt.Errorf("there is no connection pooler in the cluster") + } + + patchData, err := specPatch(newDeployment.Spec) + if err != nil { + return nil, fmt.Errorf("could not form patch for the connection pooler deployment: %v", err) + } + + // An update probably requires RetryOnConflict, but since only one operator + // worker at one time will try to update it chances of conflicts are + // minimal. + deployment, err := KubeClient. + Deployments(newDeployment.Namespace).Patch( + context.TODO(), + newDeployment.Name, + types.MergePatchType, + patchData, + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch connection pooler deployment: %v", err) + } + + return deployment, nil +} + +//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment +func updateConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, deployment *appsv1.Deployment, annotations map[string]string) (*appsv1.Deployment, error) { + patchData, err := metaAnnotationsPatch(annotations) + if err != nil { + return nil, fmt.Errorf("could not form patch for the connection pooler deployment metadata: %v", err) + } + result, err := KubeClient.Deployments(deployment.Namespace).Patch( + context.TODO(), + deployment.Name, + types.MergePatchType, + []byte(patchData), + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err) + } + return result, nil + +} + +// Test if two connection pooler configuration needs to be synced. For simplicity +// compare not the actual K8S objects, but the configuration itself and request +// sync if there is any difference. +func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { + reasons = []string{} + sync = false + + changelog, err := diff.Diff(oldSpec, newSpec) + if err != nil { + //c.logger.Infof("Cannot get diff, do not do anything, %+v", err) + return false, reasons + } + + if len(changelog) > 0 { + sync = true + } + + for _, change := range changelog { + msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'", + change.Type, change.Path, change.From, change.To) + reasons = append(reasons, msg) + } + + return sync, reasons +} + +// Check if we need to synchronize connection pooler deployment due to new +// defaults, that are different from what we see in the DeploymentSpec +func needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { + + reasons = []string{} + sync = false + + config := Config.OpConfig.ConnectionPooler + podTemplate := deployment.Spec.Template + poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] + + if spec == nil { + spec = &acidv1.ConnectionPooler{} + } + if spec.NumberOfInstances == nil && + *deployment.Spec.Replicas != *config.NumberOfInstances { + + sync = true + msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)", + *deployment.Spec.Replicas, *config.NumberOfInstances) + reasons = append(reasons, msg) + } + + if spec.DockerImage == "" && + poolerContainer.Image != config.Image { + + sync = true + msg := fmt.Sprintf("DockerImage is different (having %s, required %s)", + poolerContainer.Image, config.Image) + reasons = append(reasons, msg) + } + + expectedResources, err := generateResourceRequirements(spec.Resources, + makeDefaultConnectionPoolerResources(&Config.OpConfig)) + + // An error to generate expected resources means something is not quite + // right, but for the purpose of robustness do not panic here, just report + // and ignore resources comparison (in the worst case there will be no + // updates for new resource values). + if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { + sync = true + msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", + poolerContainer.Resources, expectedResources) + reasons = append(reasons, msg) + } + + if err != nil { + return false, reasons + } + + for _, env := range poolerContainer.Env { + if spec.User == "" && env.Name == "PGUSER" { + ref := env.ValueFrom.SecretKeyRef.LocalObjectReference + secretName := Config.OpConfig.SecretNameTemplate.Format( + "username", strings.Replace(config.User, "_", "-", -1), + "cluster", deployment.ClusterName, + "tprkind", acidv1.PostgresCRDResourceKind, + "tprgroup", acidzalando.GroupName) + + if ref.Name != secretName { + sync = true + msg := fmt.Sprintf("pooler user is different (having %s, required %s)", + ref.Name, config.User) + reasons = append(reasons, msg) + } + } + + if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema { + sync = true + msg := fmt.Sprintf("pooler schema is different (having %s, required %s)", + env.Value, config.Schema) + reasons = append(reasons, msg) + } + } + + return sync, reasons +} + +// Generate default resource section for connection pooler deployment, to be +// used if nothing custom is specified in the manifest +func makeDefaultConnectionPoolerResources(config *config.Config) acidv1.Resources { + + defaultRequests := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, + } + defaultLimits := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, + } + + return acidv1.Resources{ + ResourceRequests: defaultRequests, + ResourceLimits: defaultLimits, + } +} + +func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql) { + var v []string + + var input []*bool + if oldSpec == nil { + input = []*bool{nil, nil, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler} + } else { + input = []*bool{oldSpec.Spec.EnableConnectionPooler, oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler} + } + + for _, b := range input { + if b == nil { + v = append(v, "nil") + } else { + v = append(v, fmt.Sprintf("%v", *b)) + } + } + + log.Debugf("syncing connection pooler from (%v, %v) to (%v, %v)", v[0], v[1], v[2], v[3]) +} + +func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, LookupFunction InstallFunction) (SyncReason, error) { + logPoolerEssentials(c.logger, oldSpec, newSpec) + + var reason SyncReason + var err error + var newNeedConnectionPooler, oldNeedConnectionPooler bool + oldNeedConnectionPooler = false + + // Check and perform the sync requirements for each of the roles. + for _, role := range [2]PostgresRole{Master, Replica} { + + if role == Master { + newNeedConnectionPooler = needMasterConnectionPoolerWorker(&newSpec.Spec) + if oldSpec != nil { + oldNeedConnectionPooler = needMasterConnectionPoolerWorker(&oldSpec.Spec) + } + } else { + newNeedConnectionPooler = needReplicaConnectionPoolerWorker(&newSpec.Spec) + if oldSpec != nil { + oldNeedConnectionPooler = needReplicaConnectionPoolerWorker(&oldSpec.Spec) + } + } + + // if the call is via createConnectionPooler, then it is required to initialize + // the structure + if c.ConnectionPooler == nil { + c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} + } + if c.ConnectionPooler[role] == nil { + c.ConnectionPooler[role] = &ConnectionPoolerObjects{ + Deployment: nil, + Service: nil, + Name: c.connectionPoolerName(role), + ClusterName: c.ClusterName, + Namespace: c.Namespace, + LookupFunction: false, + Role: role, + } + } + + if newNeedConnectionPooler { + // Try to sync in any case. If we didn't needed connection pooler before, + // it means we want to create it. If it was already present, still sync + // since it could happen that there is no difference in specs, and all + // the resources are remembered, but the deployment was manually deleted + // in between + + // in this case also do not forget to install lookup function as for + // creating cluster + if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { + newConnectionPooler := newSpec.Spec.ConnectionPooler + + specSchema := "" + specUser := "" + + if newConnectionPooler != nil { + specSchema = newConnectionPooler.Schema + specUser = newConnectionPooler.User + } + + schema := util.Coalesce( + specSchema, + c.OpConfig.ConnectionPooler.Schema) + + user := util.Coalesce( + specUser, + c.OpConfig.ConnectionPooler.User) + + if err = LookupFunction(schema, user, role); err != nil { + return NoSync, err + } + } + + if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + return reason, err + } + } else { + // delete and cleanup resources if they are already detected + if c.ConnectionPooler[role] != nil && + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } + } + } + if !needMasterConnectionPoolerWorker(&newSpec.Spec) && + !needReplicaConnectionPoolerWorker(&newSpec.Spec) { + if err = c.deleteConnectionPoolerSecret(); err != nil { + c.logger.Warningf("could not remove connection pooler secret: %v", err) + } + } + + return reason, nil +} + +// Synchronize connection pooler resources. Effectively we're interested only in +// synchronizing the corresponding deployment, but in case of deployment or +// service is missing, create it. After checking, also remember an object for +// the future references. +func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) ( + SyncReason, error) { + + deployment, err := c.KubeClient. + Deployments(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "deployment %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + deploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return NoSync, fmt.Errorf(msg, err) + } + + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Deployment = deployment + } else if err != nil { + msg := "could not get connection pooler deployment to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + c.ConnectionPooler[role].Deployment = deployment + // actual synchronization + + var oldConnectionPooler *acidv1.ConnectionPooler + + if oldSpec != nil { + oldConnectionPooler = oldSpec.Spec.ConnectionPooler + } + + newConnectionPooler := newSpec.Spec.ConnectionPooler + // sync implementation below assumes that both old and new specs are + // not nil, but it can happen. To avoid any confusion like updating a + // deployment because the specification changed from nil to an empty + // struct (that was initialized somewhere before) replace any nil with + // an empty spec. + if oldConnectionPooler == nil { + oldConnectionPooler = &acidv1.ConnectionPooler{} + } + + if newConnectionPooler == nil { + newConnectionPooler = &acidv1.ConnectionPooler{} + } + + c.logger.Infof("old: %+v, new %+v", oldConnectionPooler, newConnectionPooler) + + var specSync bool + var specReason []string + + if oldSpec != nil { + specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) + } + + defaultsSync, defaultsReason := needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment) + reason := append(specReason, defaultsReason...) + + if specSync || defaultsSync { + c.logger.Infof("Update connection pooler deployment %s, reason: %+v", + c.connectionPoolerName(role), reason) + newDeploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) + if err != nil { + msg := "could not generate deployment for connection pooler: %v" + return reason, fmt.Errorf(msg, err) + } + + deployment, err := updateConnectionPoolerDeployment(c.KubeClient, + newDeploymentSpec) + + if err != nil { + return reason, err + } + c.ConnectionPooler[role].Deployment = deployment + } + } + + newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(c.ConnectionPooler[role].Deployment.Annotations)) + if newAnnotations != nil { + deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations) + if err != nil { + return nil, err + } + c.ConnectionPooler[role].Deployment = deployment + } + + service, err := c.KubeClient. + Services(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Service %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + serviceSpec := c.generateConnectionPoolerService(c.ConnectionPooler[role]) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Service = service + + } else if err != nil { + msg := "could not get connection pooler service to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + // Service updates are not supported and probably not that useful anyway + c.ConnectionPooler[role].Service = service + } + + return NoSync, nil +} diff --git a/pkg/cluster/connection_pooler_new_test.go b/pkg/cluster/connection_pooler_new_test.go new file mode 100644 index 000000000..72b3408e3 --- /dev/null +++ b/pkg/cluster/connection_pooler_new_test.go @@ -0,0 +1,45 @@ +package cluster + +import ( + "testing" + + "context" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/labels" + + "k8s.io/client-go/kubernetes/fake" +) + +func TestFakeClient(t *testing.T) { + clientSet := fake.NewSimpleClientset() + namespace := "default" + + l := labels.Set(map[string]string{ + "application": "spilo", + }) + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment1", + Namespace: namespace, + Labels: l, + }, + } + + clientSet.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{}) + + deployment2, _ := clientSet.AppsV1().Deployments(namespace).Get(context.TODO(), "my-deployment1", metav1.GetOptions{}) + + if deployment.ObjectMeta.Name != deployment2.ObjectMeta.Name { + t.Errorf("Deployments are not equal") + } + + deployments, _ := clientSet.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "application=spilo"}) + + if len(deployments.Items) != 1 { + t.Errorf("Label search does not work") + } +} diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go new file mode 100644 index 000000000..54be0f5bd --- /dev/null +++ b/pkg/cluster/connection_pooler_test.go @@ -0,0 +1,956 @@ +package cluster + +import ( + "errors" + "fmt" + "strings" + "testing" + + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func mockInstallLookupFunction(schema string, user string, role PostgresRole) error { + return nil +} + +func boolToPointer(value bool) *bool { + return &value +} + +func int32ToPointer(value int32) *int32 { + return &value +} + +func TestConnectionPoolerCreationAndDeletion(t *testing.T) { + testName := "Test connection pooler creation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + }, + }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } + + reason, err := cluster.createConnectionPooler(mockInstallLookupFunction) + + if err != nil { + t.Errorf("%s: Cannot create connection pooler, %s, %+v", + testName, err, reason) + } + for _, role := range [2]PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role] != nil { + if cluster.ConnectionPooler[role].Deployment == nil { + t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) + } + + if cluster.ConnectionPooler[role].Service == nil { + t.Errorf("%s: Connection pooler service is empty for role %s", testName, role) + } + } + } + oldSpec := &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + EnableReplicaConnectionPooler: boolToPointer(true), + }, + } + newSpec := &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(false), + EnableReplicaConnectionPooler: boolToPointer(false), + }, + } + + // Delete connection pooler via sync + _, err = cluster.syncConnectionPooler(oldSpec, newSpec, mockInstallLookupFunction) + if err != nil { + t.Errorf("%s: Cannot sync connection pooler, %s", testName, err) + } + + for _, role := range [2]PostgresRole{Master, Replica} { + err = cluster.deleteConnectionPooler(role) + if err != nil { + t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) + } + } +} + +func TestNeedConnectionPooler(t *testing.T) { + testName := "Test how connection pooler can be enabled" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is not enabled with full definition", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + } + + if !needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is not enabled with flag", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(false), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is still enabled with flag being false", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is not enabled with flag and full", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(false), + EnableReplicaConnectionPooler: boolToPointer(false), + ConnectionPooler: nil, + } + + if needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is enabled with flag false and nil", + testName) + } + + // Test for replica connection pooler + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if needReplicaConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Replica Connection pooler is not enabled with full definition", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + } + + if !needReplicaConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Replica Connection pooler is not enabled with flag", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(false), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if needReplicaConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Replica Connection pooler is still enabled with flag being false", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !needReplicaConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Replica Connection pooler is not enabled with flag and full", + testName) + } +} + +func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { + for _, role := range [2]PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role] != nil && cluster.ConnectionPooler[role].Deployment != nil && + (cluster.ConnectionPooler[role].Deployment.Spec.Replicas == nil || + *cluster.ConnectionPooler[role].Deployment.Spec.Replicas != 2) { + return fmt.Errorf("Wrong number of instances") + } + } + return nil +} + +func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + for _, role := range []PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role].Deployment == nil { + return fmt.Errorf("Deployment was not saved %s", role) + } + + if cluster.ConnectionPooler[role].Service == nil { + return fmt.Errorf("Service was not saved %s", role) + } + } + + return nil +} + +func MasterobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + if cluster.ConnectionPooler[Master].Deployment == nil { + return fmt.Errorf("Deployment was not saved") + } + + if cluster.ConnectionPooler[Master].Service == nil { + return fmt.Errorf("Service was not saved") + } + + return nil +} + +func ReplicaobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + if cluster.ConnectionPooler[Replica].Deployment == nil { + return fmt.Errorf("Deployment was not saved") + } + + if cluster.ConnectionPooler[Replica].Service == nil { + return fmt.Errorf("Service was not saved") + } + + return nil +} + +func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { + for _, role := range [2]PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role] != nil && + (cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) { + return fmt.Errorf("Connection pooler was not deleted for role %v", role) + } + } + + return nil +} + +func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { + + if cluster.ConnectionPooler[Master] != nil && + (cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) { + return fmt.Errorf("Connection pooler master was not deleted") + } + return nil +} + +func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { + + if cluster.ConnectionPooler[Replica] != nil && + (cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) { + return fmt.Errorf("Connection pooler replica was not deleted") + } + return nil +} + +func noEmptySync(cluster *Cluster, err error, reason SyncReason) error { + for _, msg := range reason { + if strings.HasPrefix(msg, "update [] from '' to '") { + return fmt.Errorf("There is an empty reason, %s", msg) + } + } + + return nil +} + +func TestConnectionPoolerSynchronization(t *testing.T) { + testName := "Test connection pooler synchronization" + newCluster := func(client k8sutil.KubernetesClient) *Cluster { + return New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + }, + }, client, acidv1.Postgresql{}, logger, eventRecorder) + } + cluster := newCluster(k8sutil.KubernetesClient{}) + + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + + tests := []struct { + subTest string + oldSpec *acidv1.Postgresql + newSpec *acidv1.Postgresql + cluster *Cluster + defaultImage string + defaultInstances int32 + check func(cluster *Cluster, err error, reason SyncReason) error + }{ + { + subTest: "create from scratch", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: newCluster(k8sutil.ClientMissingObjects()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: MasterobjectsAreSaved, + }, + { + subTest: "create if doesn't exist", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: newCluster(k8sutil.ClientMissingObjects()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: MasterobjectsAreSaved, + }, + { + subTest: "create if doesn't exist with a flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + }, + }, + cluster: newCluster(k8sutil.ClientMissingObjects()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: MasterobjectsAreSaved, + }, + { + subTest: "create no replica with flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(false), + }, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, + }, + { + subTest: "create replica if doesn't exist with a flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: ReplicaobjectsAreSaved, + }, + { + subTest: "create both master and replica", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + EnableConnectionPooler: boolToPointer(true), + }, + }, + cluster: newCluster(k8sutil.ClientMissingObjects()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreSaved, + }, + { + subTest: "delete only replica if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: OnlyReplicaDeleted, + }, + { + subTest: "delete only master if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableConnectionPooler: boolToPointer(true), + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: OnlyMasterDeleted, + }, + { + subTest: "delete if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, + }, + { + subTest: "cleanup if still there", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, + }, + { + subTest: "update deployment", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(1), + }, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(2), + }, + }, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: deploymentUpdated, + }, + { + subTest: "update deployment", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(1), + }, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + NumberOfInstances: int32ToPointer(2), + }, + }, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: deploymentUpdated, + }, + { + subTest: "update image from changed defaults", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:2.0", + defaultInstances: 2, + check: deploymentUpdated, + }, + { + subTest: "there is no sync from nil to an empty spec", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: nil, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: newCluster(k8sutil.NewMockKubernetesClient()), + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: noEmptySync, + }, + } + for _, tt := range tests { + tt.cluster.OpConfig.ConnectionPooler.Image = tt.defaultImage + tt.cluster.OpConfig.ConnectionPooler.NumberOfInstances = + int32ToPointer(tt.defaultInstances) + + reason, err := tt.cluster.syncConnectionPooler(tt.oldSpec, + tt.newSpec, mockInstallLookupFunction) + + if err := tt.check(tt.cluster, err, reason); err != nil { + t.Errorf("%s [%s]: Could not synchronize, %+v", + testName, tt.subTest, err) + } + } +} + +func TestConnectionPoolerPodSpec(t *testing.T) { + testName := "Test connection pooler pod template generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + MaxDBConnections: int32ToPointer(60), + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } + var clusterNoDefaultRes = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{}, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + clusterNoDefaultRes.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } + + noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + expected error + cluster *Cluster + check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: nil, + cluster: cluster, + check: noCheck, + }, + { + subTest: "no default resources", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`), + cluster: clusterNoDefaultRes, + check: noCheck, + }, + { + subTest: "default resources are set", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: nil, + cluster: cluster, + check: testResources, + }, + { + subTest: "labels for service", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: testLabels, + }, + { + subTest: "required envs", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: nil, + cluster: cluster, + check: testEnvs, + }, + } + for _, role := range [2]PostgresRole{Master, Replica} { + for _, tt := range tests { + podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(role) + + if err != tt.expected && err.Error() != tt.expected.Error() { + t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", + testName, tt.subTest, err, tt.expected) + } + + err = tt.check(cluster, podSpec, role) + if err != nil { + t.Errorf("%s [%s]: Pod spec is incorrect, %+v", + testName, tt.subTest, err) + } + } + } +} + +func TestConnectionPoolerDeploymentSpec(t *testing.T) { + testName := "Test connection pooler deployment spec generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: true, + Name: "", + Role: Master, + }, + } + + noCheck := func(cluster *Cluster, deployment *appsv1.Deployment) error { + return nil + } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + expected error + cluster *Cluster + check func(cluster *Cluster, deployment *appsv1.Deployment) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: noCheck, + }, + { + subTest: "owner reference", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: testDeploymentOwnerReference, + }, + { + subTest: "selector", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: testSelector, + }, + } + for _, tt := range tests { + deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[Master]) + + if err != tt.expected && err.Error() != tt.expected.Error() { + t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", + testName, tt.subTest, err, tt.expected) + } + + err = tt.check(cluster, deployment) + if err != nil { + t.Errorf("%s [%s]: Deployment spec is incorrect, %+v", + testName, tt.subTest, err) + } + } +} + +func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { + cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"] + if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest { + return fmt.Errorf("CPU request does not match, got %s, expected %s", + cpuReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest) + } + + memReq := podSpec.Spec.Containers[0].Resources.Requests["memory"] + if memReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest { + return fmt.Errorf("Memory request does not match, got %s, expected %s", + memReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest) + } + + cpuLim := podSpec.Spec.Containers[0].Resources.Limits["cpu"] + if cpuLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit { + return fmt.Errorf("CPU limit does not match, got %s, expected %s", + cpuLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit) + } + + memLim := podSpec.Spec.Containers[0].Resources.Limits["memory"] + if memLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit { + return fmt.Errorf("Memory limit does not match, got %s, expected %s", + memLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit) + } + + return nil +} + +func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { + poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"] + + if poolerLabels != cluster.connectionPoolerLabels(role, true).MatchLabels["connection-pooler"] { + return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", + podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabels(role, true).MatchLabels) + } + + return nil +} + +func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { + labels := deployment.Spec.Selector.MatchLabels + expected := cluster.connectionPoolerLabels(Master, true).MatchLabels + + if labels["connection-pooler"] != expected["connection-pooler"] { + return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", + labels, expected) + } + + return nil +} + +func testServiceSelector(cluster *Cluster, service *v1.Service, role PostgresRole) error { + selector := service.Spec.Selector + + if selector["connection-pooler"] != cluster.connectionPoolerName(role) { + return fmt.Errorf("Selector is incorrect, got %s, expected %s", + selector["connection-pooler"], cluster.connectionPoolerName(role)) + } + + return nil +} + +func TestConnectionPoolerServiceSpec(t *testing.T) { + testName := "Test connection pooler service spec generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: false, + Role: Master, + }, + Replica: { + Deployment: nil, + Service: nil, + LookupFunction: false, + Role: Replica, + }, + } + + noCheck := func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error { + return nil + } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + cluster *Cluster + check func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + cluster: cluster, + check: noCheck, + }, + { + subTest: "owner reference", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + cluster: cluster, + check: testServiceOwnerReference, + }, + { + subTest: "selector", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + cluster: cluster, + check: testServiceSelector, + }, + } + for _, role := range [2]PostgresRole{Master, Replica} { + for _, tt := range tests { + service := tt.cluster.generateConnectionPoolerService(tt.cluster.ConnectionPooler[role]) + + if err := tt.check(cluster, service, role); err != nil { + t.Errorf("%s [%s]: Service spec is incorrect, %+v", + testName, tt.subTest, err) + } + } + } +} diff --git a/pkg/cluster/database.go b/pkg/cluster/database.go index 75e2d2097..760b68d72 100644 --- a/pkg/cluster/database.go +++ b/pkg/cluster/database.go @@ -101,15 +101,20 @@ func (c *Cluster) databaseAccessDisabled() bool { } func (c *Cluster) initDbConn() error { - return c.initDbConnWithName("") -} - -func (c *Cluster) initDbConnWithName(dbname string) error { - c.setProcessName("initializing db connection") if c.pgDb != nil { return nil } + return c.initDbConnWithName("") +} + +// Worker function for connection initialization. This function does not check +// if the connection is already open, if it is then it will be overwritten. +// Callers need to make sure no connection is open, otherwise we could leak +// connections +func (c *Cluster) initDbConnWithName(dbname string) error { + c.setProcessName("initializing db connection") + var conn *sql.DB connstring := c.pgConnectionString(dbname) @@ -126,12 +131,12 @@ func (c *Cluster) initDbConnWithName(dbname string) error { } if _, ok := err.(*net.OpError); ok { - c.logger.Errorf("could not connect to PostgreSQL database: %v", err) + c.logger.Warningf("could not connect to Postgres database: %v", err) return false, nil } if err2 := conn.Close(); err2 != nil { - c.logger.Errorf("error when closing PostgreSQL connection after another error: %v", err) + c.logger.Errorf("error when closing Postgres connection after another error: %v", err) return false, err2 } @@ -145,6 +150,12 @@ func (c *Cluster) initDbConnWithName(dbname string) error { conn.SetMaxOpenConns(1) conn.SetMaxIdleConns(-1) + if c.pgDb != nil { + msg := "closing an existing connection before opening a new one to %s" + c.logger.Warningf(msg, dbname) + c.closeDbConn() + } + c.pgDb = conn return nil @@ -155,7 +166,7 @@ func (c *Cluster) connectionIsClosed() bool { } func (c *Cluster) closeDbConn() (err error) { - c.setProcessName("closing db connection") + c.setProcessName("closing database connection") if c.pgDb != nil { c.logger.Debug("closing database connection") if err = c.pgDb.Close(); err != nil { @@ -170,7 +181,7 @@ func (c *Cluster) closeDbConn() (err error) { } func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUserMap, err error) { - c.setProcessName("reading users from the db") + c.setProcessName("reading users from the database") var rows *sql.Rows users = make(spec.PgUserMap) if rows, err = c.pgDb.Query(getUserSQL, pq.Array(userNames)); err != nil { @@ -462,11 +473,14 @@ func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doi } // Creates a connection pool credentials lookup function in every database to -// perform remote authentification. -func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { +// perform remote authentication. +func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string, role PostgresRole) error { var stmtBytes bytes.Buffer + c.logger.Info("Installing lookup function") + // Open a new connection if not yet done. This connection will be used only + // to get the list of databases, not for the actuall installation. if err := c.initDbConn(); err != nil { return fmt.Errorf("could not init database connection") } @@ -480,36 +494,40 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { } }() + // List of databases we failed to process. At the moment it function just + // like a flag to retry on the next sync, but in the future we may want to + // retry only necessary parts, so let's keep the list. + failedDatabases := []string{} currentDatabases, err := c.getDatabases() if err != nil { msg := "could not get databases to install pooler lookup function: %v" return fmt.Errorf(msg, err) } + // We've got the list of target databases, now close this connection to + // open a new one to every each of them. + if err := c.closeDbConn(); err != nil { + c.logger.Errorf("could not close database connection: %v", err) + } + templater := template.Must(template.New("sql").Parse(connectionPoolerLookup)) + params := TemplateParams{ + "pooler_schema": poolerSchema, + "pooler_user": poolerUser, + } + + if err := templater.Execute(&stmtBytes, params); err != nil { + msg := "could not prepare sql statement %+v: %v" + return fmt.Errorf(msg, params, err) + } for dbname := range currentDatabases { + if dbname == "template0" || dbname == "template1" { continue } - if err := c.initDbConnWithName(dbname); err != nil { - return fmt.Errorf("could not init database connection to %s", dbname) - } - - c.logger.Infof("Install pooler lookup function into %s", dbname) - - params := TemplateParams{ - "pooler_schema": poolerSchema, - "pooler_user": poolerUser, - } - - if err := templater.Execute(&stmtBytes, params); err != nil { - c.logger.Errorf("could not prepare sql statement %+v: %v", - params, err) - // process other databases - continue - } + c.logger.Infof("install pooler lookup function into database '%s'", dbname) // golang sql will do retries couple of times if pq driver reports // connections issues (driver.ErrBadConn), but since our query is @@ -520,7 +538,20 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { constants.PostgresConnectTimeout, constants.PostgresConnectRetryTimeout, func() (bool, error) { - if _, err := c.pgDb.Exec(stmtBytes.String()); err != nil { + + // At this moment we are not connected to any database + if err := c.initDbConnWithName(dbname); err != nil { + msg := "could not init database connection to %s" + return false, fmt.Errorf(msg, dbname) + } + defer func() { + if err := c.closeDbConn(); err != nil { + msg := "could not close database connection: %v" + c.logger.Errorf(msg, err) + } + }() + + if _, err = c.pgDb.Exec(stmtBytes.String()); err != nil { msg := fmt.Errorf("could not execute sql statement %s: %v", stmtBytes.String(), err) return false, msg @@ -533,15 +564,15 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { c.logger.Errorf("could not execute after retries %s: %v", stmtBytes.String(), err) // process other databases + failedDatabases = append(failedDatabases, dbname) continue } - c.logger.Infof("pooler lookup function installed into %s", dbname) - if err := c.closeDbConn(); err != nil { - c.logger.Errorf("could not close database connection: %v", err) - } } - c.ConnectionPooler.LookupFunction = true + if len(failedDatabases) == 0 { + c.ConnectionPooler[role].LookupFunction = true + } + return nil } diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 534ae7b8e..6b47b37f6 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -7,6 +7,7 @@ import ( "path" "sort" "strconv" + "strings" "github.com/sirupsen/logrus" @@ -20,7 +21,6 @@ import ( acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" - pkgspec "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" @@ -75,10 +75,6 @@ func (c *Cluster) statefulSetName() string { return c.Name } -func (c *Cluster) connectionPoolerName() string { - return c.Name + "-pooler" -} - func (c *Cluster) endpointName(role PostgresRole) string { name := c.Name if role == Replica { @@ -142,26 +138,6 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources { } } -// Generate default resource section for connection pooler deployment, to be -// used if nothing custom is specified in the manifest -func (c *Cluster) makeDefaultConnectionPoolerResources() acidv1.Resources { - config := c.OpConfig - - defaultRequests := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, - } - defaultLimits := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, - } - - return acidv1.Resources{ - ResourceRequests: defaultRequests, - ResourceLimits: defaultLimits, - } -} - func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) { var err error @@ -213,7 +189,7 @@ func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceD return requests, nil } -func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, logger *logrus.Entry) (string, error) { +func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, EnablePgVersionEnvVar bool, logger *logrus.Entry) (string, error) { config := spiloConfiguration{} config.Bootstrap = pgBootstrap{} @@ -294,7 +270,14 @@ PatroniInitDBParams: } config.PgLocalConfiguration = make(map[string]interface{}) - config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion) + + // the newer and preferred way to specify the PG version is to use the `PGVERSION` env variable + // setting postgresq.bin_dir in the SPILO_CONFIGURATION still works and takes precedence over PGVERSION + // so we add postgresq.bin_dir only if PGVERSION is unused + // see PR 222 in Spilo + if !EnablePgVersionEnvVar { + config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion) + } if len(pg.Parameters) > 0 { local, bootstrap := getLocalAndBoostrapPostgreSQLParameters(pg.Parameters) @@ -337,25 +320,39 @@ func getLocalAndBoostrapPostgreSQLParameters(parameters map[string]string) (loca return } -func nodeAffinity(nodeReadinessLabel map[string]string) *v1.Affinity { - matchExpressions := make([]v1.NodeSelectorRequirement, 0) - if len(nodeReadinessLabel) == 0 { +func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity { + if len(nodeReadinessLabel) == 0 && nodeAffinity == nil { return nil } - for k, v := range nodeReadinessLabel { - matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{ - Key: k, - Operator: v1.NodeSelectorOpIn, - Values: []string{v}, - }) + nodeAffinityCopy := *&v1.NodeAffinity{} + if nodeAffinity != nil { + nodeAffinityCopy = *nodeAffinity.DeepCopy() + } + if len(nodeReadinessLabel) > 0 { + matchExpressions := make([]v1.NodeSelectorRequirement, 0) + for k, v := range nodeReadinessLabel { + matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{ + Key: k, + Operator: v1.NodeSelectorOpIn, + Values: []string{v}, + }) + } + nodeReadinessSelectorTerm := v1.NodeSelectorTerm{MatchExpressions: matchExpressions} + if nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution == nil { + nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + nodeReadinessSelectorTerm, + }, + } + } else { + nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{ + NodeSelectorTerms: append(nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, nodeReadinessSelectorTerm), + } + } } return &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{{MatchExpressions: matchExpressions}}, - }, - }, + NodeAffinity: &nodeAffinityCopy, } } @@ -531,7 +528,7 @@ func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, su }, }, } - mergedEnv := append(container.Env, env...) + mergedEnv := append(env, container.Env...) container.Env = deduplicateEnvVars(mergedEnv, container.Name, logger) result = append(result, container) } @@ -557,8 +554,11 @@ func (c *Cluster) generatePodTemplate( initContainers []v1.Container, sidecarContainers []v1.Container, tolerationsSpec *[]v1.Toleration, + spiloRunAsUser *int64, + spiloRunAsGroup *int64, spiloFSGroup *int64, nodeAffinity *v1.Affinity, + schedulerName *string, terminateGracePeriod int64, podServiceAccountName string, kubeIAMRole string, @@ -576,6 +576,14 @@ func (c *Cluster) generatePodTemplate( containers = append(containers, sidecarContainers...) securityContext := v1.PodSecurityContext{} + if spiloRunAsUser != nil { + securityContext.RunAsUser = spiloRunAsUser + } + + if spiloRunAsGroup != nil { + securityContext.RunAsGroup = spiloRunAsGroup + } + if spiloFSGroup != nil { securityContext.FSGroup = spiloFSGroup } @@ -589,6 +597,10 @@ func (c *Cluster) generatePodTemplate( SecurityContext: &securityContext, } + if schedulerName != nil { + podSpec.SchedulerName = *schedulerName + } + if shmVolume != nil && *shmVolume { addShmVolume(&podSpec) } @@ -705,6 +717,9 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri Value: c.OpConfig.PamRoleName, }, } + if c.OpConfig.EnablePgVersionEnvVar { + envVars = append(envVars, v1.EnvVar{Name: "PGVERSION", Value: c.Spec.PgVersion}) + } // Spilo expects cluster labels as JSON if clusterLabels, err := json.Marshal(labels.Set(c.OpConfig.ClusterLabels)); err != nil { envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_LABELS", Value: labels.Set(c.OpConfig.ClusterLabels).String()}) @@ -714,17 +729,6 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri if spiloConfiguration != "" { envVars = append(envVars, v1.EnvVar{Name: "SPILO_CONFIGURATION", Value: spiloConfiguration}) } - if c.OpConfig.WALES3Bucket != "" { - envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) - envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) - envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) - } - - if c.OpConfig.LogS3Bucket != "" { - envVars = append(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket}) - envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) - envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""}) - } if c.patroniUsesKubernetes() { envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"}) @@ -736,7 +740,7 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"}) } - if cloneDescription.ClusterName != "" { + if cloneDescription != nil && cloneDescription.ClusterName != "" { envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...) } @@ -744,10 +748,34 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...) } + // add vars taken from pod_environment_configmap and pod_environment_secret first + // (to allow them to override the globals set in the operator config) if len(customPodEnvVarsList) > 0 { envVars = append(envVars, customPodEnvVarsList...) } + if c.OpConfig.WALES3Bucket != "" { + envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) + envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) + } + + if c.OpConfig.WALGSBucket != "" { + envVars = append(envVars, v1.EnvVar{Name: "WAL_GS_BUCKET", Value: c.OpConfig.WALGSBucket}) + envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) + } + + if c.OpConfig.GCPCredentials != "" { + envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials}) + } + + if c.OpConfig.LogS3Bucket != "" { + envVars = append(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket}) + envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""}) + } + return envVars } @@ -766,13 +794,81 @@ func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus. result = append(result, input[i]) } else if names[va.Name] == 1 { names[va.Name]++ - logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored", - va.Name, containerName) + + // Some variables (those to configure the WAL_ and LOG_ shipping) may be overwritten, only log as info + if strings.HasPrefix(va.Name, "WAL_") || strings.HasPrefix(va.Name, "LOG_") { + logger.Infof("global variable %q has been overwritten by configmap/secret for container %q", + va.Name, containerName) + } else { + logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored", + va.Name, containerName) + } } } return result } +// Return list of variables the pod recieved from the configured ConfigMap +func (c *Cluster) getPodEnvironmentConfigMapVariables() ([]v1.EnvVar, error) { + configMapPodEnvVarsList := make([]v1.EnvVar, 0) + + if c.OpConfig.PodEnvironmentConfigMap.Name == "" { + return configMapPodEnvVarsList, nil + } + + cm, err := c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get( + context.TODO(), + c.OpConfig.PodEnvironmentConfigMap.Name, + metav1.GetOptions{}) + if err != nil { + // if not found, try again using the cluster's namespace if it's different (old behavior) + if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace { + cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get( + context.TODO(), + c.OpConfig.PodEnvironmentConfigMap.Name, + metav1.GetOptions{}) + } + if err != nil { + return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) + } + } + for k, v := range cm.Data { + configMapPodEnvVarsList = append(configMapPodEnvVarsList, v1.EnvVar{Name: k, Value: v}) + } + return configMapPodEnvVarsList, nil +} + +// Return list of variables the pod received from the configured Secret +func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) { + secretPodEnvVarsList := make([]v1.EnvVar, 0) + + if c.OpConfig.PodEnvironmentSecret == "" { + return secretPodEnvVarsList, nil + } + + secret, err := c.KubeClient.Secrets(c.Namespace).Get( + context.TODO(), + c.OpConfig.PodEnvironmentSecret, + metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not read Secret PodEnvironmentSecretName: %v", err) + } + + for k := range secret.Data { + secretPodEnvVarsList = append(secretPodEnvVarsList, + v1.EnvVar{Name: k, ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: c.OpConfig.PodEnvironmentSecret, + }, + Key: k, + }, + }}) + } + + return secretPodEnvVarsList, nil +} + func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container { name := sidecar.Name if name == "" { @@ -818,41 +914,6 @@ func extractPgVersionFromBinPath(binPath string, template string) (string, error return fmt.Sprintf("%v", pgVersion), nil } -func (c *Cluster) getNewPgVersion(container v1.Container, newPgVersion string) (string, error) { - var ( - spiloConfiguration spiloConfiguration - runningPgVersion string - err error - ) - - for _, env := range container.Env { - if env.Name != "SPILO_CONFIGURATION" { - continue - } - err = json.Unmarshal([]byte(env.Value), &spiloConfiguration) - if err != nil { - return newPgVersion, err - } - } - - if len(spiloConfiguration.PgLocalConfiguration) > 0 { - currentBinPath := fmt.Sprintf("%v", spiloConfiguration.PgLocalConfiguration[patroniPGBinariesParameterName]) - runningPgVersion, err = extractPgVersionFromBinPath(currentBinPath, pgBinariesLocationTemplate) - if err != nil { - return "", fmt.Errorf("could not extract Postgres version from %v in SPILO_CONFIGURATION", currentBinPath) - } - } else { - return "", fmt.Errorf("could not find %q setting in SPILO_CONFIGURATION", patroniPGBinariesParameterName) - } - - if runningPgVersion != newPgVersion { - c.logger.Warningf("postgresql version change(%q -> %q) has no effect", runningPgVersion, newPgVersion) - newPgVersion = runningPgVersion - } - - return newPgVersion, nil -} - func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.StatefulSet, error) { var ( @@ -932,32 +993,34 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef initContainers = spec.InitContainers } - customPodEnvVarsList := make([]v1.EnvVar, 0) - - if c.OpConfig.PodEnvironmentConfigMap != (pkgspec.NamespacedName{}) { - var cm *v1.ConfigMap - cm, err = c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get( - context.TODO(), - c.OpConfig.PodEnvironmentConfigMap.Name, - metav1.GetOptions{}) - if err != nil { - // if not found, try again using the cluster's namespace if it's different (old behavior) - if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace { - cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get( - context.TODO(), - c.OpConfig.PodEnvironmentConfigMap.Name, - metav1.GetOptions{}) - } - if err != nil { - return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) - } - } - for k, v := range cm.Data { - customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v}) - } - sort.Slice(customPodEnvVarsList, - func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name }) + spiloCompathWalPathList := make([]v1.EnvVar, 0) + if c.OpConfig.EnableSpiloWalPathCompat { + spiloCompathWalPathList = append(spiloCompathWalPathList, + v1.EnvVar{ + Name: "ENABLE_WAL_PATH_COMPAT", + Value: "true", + }, + ) } + + // fetch env vars from custom ConfigMap + configMapEnvVarsList, err := c.getPodEnvironmentConfigMapVariables() + if err != nil { + return nil, err + } + + // fetch env vars from custom ConfigMap + secretEnvVarsList, err := c.getPodEnvironmentSecretVariables() + if err != nil { + return nil, err + } + + // concat all custom pod env vars and sort them + customPodEnvVarsList := append(spiloCompathWalPathList, configMapEnvVarsList...) + customPodEnvVarsList = append(customPodEnvVarsList, secretEnvVarsList...) + sort.Slice(customPodEnvVarsList, + func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name }) + if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" { return nil, fmt.Errorf("s3_wal_path is empty for standby cluster") } @@ -984,7 +1047,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef } } - spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger) + spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.OpConfig.EnablePgVersionEnvVar, c.logger) if err != nil { return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err) } @@ -993,7 +1056,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef spiloEnvVars := c.generateSpiloPodEnvVars( c.Postgresql.GetUID(), spiloConfiguration, - &spec.Clone, + spec.Clone, spec.StandbyCluster, customPodEnvVarsList, ) @@ -1001,7 +1064,17 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef // pickup the docker image for the spilo container effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage) - // determine the FSGroup for the spilo pod + // determine the User, Group and FSGroup for the spilo pod + effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser + if spec.SpiloRunAsUser != nil { + effectiveRunAsUser = spec.SpiloRunAsUser + } + + effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup + if spec.SpiloRunAsGroup != nil { + effectiveRunAsGroup = spec.SpiloRunAsGroup + } + effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup if spec.SpiloFSGroup != nil { effectiveFSGroup = spec.SpiloFSGroup @@ -1065,7 +1138,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef } // generate the spilo container - c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars) + c.logger.Debugf("Generating Spilo container, environment variables") + c.logger.Debugf("%v", spiloEnvVars) + spiloContainer := generateContainer(c.containerName(), &effectiveDockerImage, resourceRequirements, @@ -1134,19 +1209,22 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) - annotations := c.generatePodAnnotations(spec) + podAnnotations := c.generatePodAnnotations(spec) // generate pod template for the statefulset, based on the spilo container and sidecars podTemplate, err = c.generatePodTemplate( c.Namespace, c.labelsSet(true), - annotations, + c.annotationsSet(podAnnotations), spiloContainer, initContainers, sidecarContainers, &tolerationSpec, + effectiveRunAsUser, + effectiveRunAsGroup, effectiveFSGroup, - nodeAffinity(c.OpConfig.NodeReadinessLabel), + nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity), + spec.SchedulerName, int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), c.OpConfig.PodServiceAccountName, c.OpConfig.KubeIAMRole, @@ -1183,15 +1261,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy) } - annotations = make(map[string]string) - annotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false) + stsAnnotations := make(map[string]string) + stsAnnotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false) + stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil)) statefulSet := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: c.statefulSetName(), Namespace: c.Namespace, Labels: c.labelsSet(true), - Annotations: c.AnnotationsToPropagate(annotations), + Annotations: stsAnnotations, }, Spec: appsv1.StatefulSetSpec{ Replicas: &numberOfInstances, @@ -1484,9 +1563,10 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) username := pgUser.Name secret := v1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: c.credentialSecretName(username), - Namespace: namespace, - Labels: c.labelsSet(true), + Name: c.credentialSecretName(username), + Namespace: namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), }, Type: v1.SecretTypeOpaque, Data: map[string][]byte{ @@ -1547,6 +1627,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) } c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges) + serviceSpec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(c.OpConfig.ExternalTrafficPolicy) serviceSpec.Type = v1.ServiceTypeLoadBalancer } else if role == Replica { // before PR #258, the replica service was only created if allocated a LB @@ -1559,7 +1640,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) Name: c.serviceName(role), Namespace: c.Namespace, Labels: c.roleLabelsSet(true, role), - Annotations: c.generateServiceAnnotations(role, spec), + Annotations: c.annotationsSet(c.generateServiceAnnotations(role, spec)), }, Spec: serviceSpec, } @@ -1657,11 +1738,31 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) msg := "Figure out which S3 bucket to use from env" c.logger.Info(msg, description.S3WalPath) + if c.OpConfig.WALES3Bucket != "" { + envs := []v1.EnvVar{ + { + Name: "CLONE_WAL_S3_BUCKET", + Value: c.OpConfig.WALES3Bucket, + }, + } + result = append(result, envs...) + } else if c.OpConfig.WALGSBucket != "" { + envs := []v1.EnvVar{ + { + Name: "CLONE_WAL_GS_BUCKET", + Value: c.OpConfig.WALGSBucket, + }, + { + Name: "CLONE_GOOGLE_APPLICATION_CREDENTIALS", + Value: c.OpConfig.GCPCredentials, + }, + } + result = append(result, envs...) + } else { + c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.") + } + envs := []v1.EnvVar{ - { - Name: "CLONE_WAL_S3_BUCKET", - Value: c.OpConfig.WALES3Bucket, - }, { Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.UID), @@ -1742,9 +1843,10 @@ func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget return &policybeta1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ - Name: c.podDisruptionBudgetName(), - Namespace: c.Namespace, - Labels: c.labelsSet(true), + Name: c.podDisruptionBudgetName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), }, Spec: policybeta1.PodDisruptionBudgetSpec{ MinAvailable: &minAvailable, @@ -1824,7 +1926,10 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { []v1.Container{}, &[]v1.Toleration{}, nil, - nodeAffinity(c.OpConfig.NodeReadinessLabel), + nil, + nil, + nodeAffinity(c.OpConfig.NodeReadinessLabel, nil), + nil, int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), c.OpConfig.PodServiceAccountName, c.OpConfig.KubeIAMRole, @@ -1861,9 +1966,10 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { cronJob := &batchv1beta1.CronJob{ ObjectMeta: metav1.ObjectMeta{ - Name: c.getLogicalBackupJobName(), - Namespace: c.Namespace, - Labels: c.labelsSet(true), + Name: c.getLogicalBackupJobName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), }, Spec: batchv1beta1.CronJobSpec{ Schedule: schedule, @@ -1896,6 +2002,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { }, }, // Bucket env vars + { + Name: "LOGICAL_BACKUP_PROVIDER", + Value: c.OpConfig.LogicalBackup.LogicalBackupProvider, + }, { Name: "LOGICAL_BACKUP_S3_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket, @@ -1916,6 +2026,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())), }, + { + Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", + Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials, + }, // Postgres env vars { Name: "PG_VERSION", @@ -1958,7 +2072,8 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { envVars = append(envVars, v1.EnvVar{Name: "AWS_SECRET_ACCESS_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey}) } - c.logger.Debugf("Generated logical backup env vars %v", envVars) + c.logger.Debugf("Generated logical backup env vars") + c.logger.Debugf("%v", envVars) return envVars } @@ -1967,179 +2082,6 @@ func (c *Cluster) getLogicalBackupJobName() (jobName string) { return "logical-backup-" + c.clusterName().Name } -// Generate pool size related environment variables. -// -// MAX_DB_CONN would specify the global maximum for connections to a target -// database. -// -// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough. -// -// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when -// most of the queries coming through a connection pooler are from the same -// user to the same db). In case if we want to spin up more connection pooler -// instances, take this into account and maintain the same number of -// connections. -// -// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload -// have to wait for spinning up a new connections. -// -// RESERVE_SIZE is how many additional connections to allow for a pooler. -func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar { - effectiveMode := util.Coalesce( - spec.ConnectionPooler.Mode, - c.OpConfig.ConnectionPooler.Mode) - - numberOfInstances := spec.ConnectionPooler.NumberOfInstances - if numberOfInstances == nil { - numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, - k8sutil.Int32ToPointer(1)) - } - - effectiveMaxDBConn := util.CoalesceInt32( - spec.ConnectionPooler.MaxDBConnections, - c.OpConfig.ConnectionPooler.MaxDBConnections) - - if effectiveMaxDBConn == nil { - effectiveMaxDBConn = k8sutil.Int32ToPointer( - constants.ConnectionPoolerMaxDBConnections) - } - - maxDBConn := *effectiveMaxDBConn / *numberOfInstances - - defaultSize := maxDBConn / 2 - minSize := defaultSize / 2 - reserveSize := minSize - - return []v1.EnvVar{ - { - Name: "CONNECTION_POOLER_PORT", - Value: fmt.Sprint(pgPort), - }, - { - Name: "CONNECTION_POOLER_MODE", - Value: effectiveMode, - }, - { - Name: "CONNECTION_POOLER_DEFAULT_SIZE", - Value: fmt.Sprint(defaultSize), - }, - { - Name: "CONNECTION_POOLER_MIN_SIZE", - Value: fmt.Sprint(minSize), - }, - { - Name: "CONNECTION_POOLER_RESERVE_SIZE", - Value: fmt.Sprint(reserveSize), - }, - { - Name: "CONNECTION_POOLER_MAX_CLIENT_CONN", - Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections), - }, - { - Name: "CONNECTION_POOLER_MAX_DB_CONN", - Value: fmt.Sprint(maxDBConn), - }, - } -} - -func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec) ( - *v1.PodTemplateSpec, error) { - - gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) - resources, err := generateResourceRequirements( - spec.ConnectionPooler.Resources, - c.makeDefaultConnectionPoolerResources()) - - effectiveDockerImage := util.Coalesce( - spec.ConnectionPooler.DockerImage, - c.OpConfig.ConnectionPooler.Image) - - effectiveSchema := util.Coalesce( - spec.ConnectionPooler.Schema, - c.OpConfig.ConnectionPooler.Schema) - - if err != nil { - return nil, fmt.Errorf("could not generate resource requirements: %v", err) - } - - secretSelector := func(key string) *v1.SecretKeySelector { - effectiveUser := util.Coalesce( - spec.ConnectionPooler.User, - c.OpConfig.ConnectionPooler.User) - - return &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: c.credentialSecretName(effectiveUser), - }, - Key: key, - } - } - - envVars := []v1.EnvVar{ - { - Name: "PGHOST", - Value: c.serviceAddress(Master), - }, - { - Name: "PGPORT", - Value: c.servicePort(Master), - }, - { - Name: "PGUSER", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: secretSelector("username"), - }, - }, - // the convention is to use the same schema name as - // connection pooler username - { - Name: "PGSCHEMA", - Value: effectiveSchema, - }, - { - Name: "PGPASSWORD", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: secretSelector("password"), - }, - }, - } - - envVars = append(envVars, c.getConnectionPoolerEnvVars(spec)...) - - poolerContainer := v1.Container{ - Name: connectionPoolerContainer, - Image: effectiveDockerImage, - ImagePullPolicy: v1.PullIfNotPresent, - Resources: *resources, - Ports: []v1.ContainerPort{ - { - ContainerPort: pgPort, - Protocol: v1.ProtocolTCP, - }, - }, - Env: envVars, - } - - podTemplate := &v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: c.connectionPoolerLabelsSelector().MatchLabels, - Namespace: c.Namespace, - Annotations: c.generatePodAnnotations(spec), - }, - Spec: v1.PodSpec{ - ServiceAccountName: c.OpConfig.PodServiceAccountName, - TerminationGracePeriodSeconds: &gracePeriod, - Containers: []v1.Container{poolerContainer}, - // TODO: add tolerations to scheduler pooler on the same node - // as database - //Tolerations: *tolerationsSpec, - }, - } - - return podTemplate, nil -} - // Return an array of ownerReferences to make an arbitraty object dependent on // the StatefulSet. Dependency is made on StatefulSet instead of PostgreSQL CRD // while the former is represent the actual state, and only it's deletion means @@ -2165,108 +2107,6 @@ func (c *Cluster) ownerReferences() []metav1.OwnerReference { } } -func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec) ( - *appsv1.Deployment, error) { - - // there are two ways to enable connection pooler, either to specify a - // connectionPooler section or enableConnectionPooler. In the second case - // spec.connectionPooler will be nil, so to make it easier to calculate - // default values, initialize it to an empty structure. It could be done - // anywhere, but here is the earliest common entry point between sync and - // create code, so init here. - if spec.ConnectionPooler == nil { - spec.ConnectionPooler = &acidv1.ConnectionPooler{} - } - - podTemplate, err := c.generateConnectionPoolerPodTemplate(spec) - numberOfInstances := spec.ConnectionPooler.NumberOfInstances - if numberOfInstances == nil { - numberOfInstances = util.CoalesceInt32( - c.OpConfig.ConnectionPooler.NumberOfInstances, - k8sutil.Int32ToPointer(1)) - } - - if *numberOfInstances < constants.ConnectionPoolerMinInstances { - msg := "Adjusted number of connection pooler instances from %d to %d" - c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) - - *numberOfInstances = constants.ConnectionPoolerMinInstances - } - - if err != nil { - return nil, err - } - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector().MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this deployment, but there is a hope that this object - // will be garbage collected if something went wrong and operator - // didn't deleted it. - OwnerReferences: c.ownerReferences(), - }, - Spec: appsv1.DeploymentSpec{ - Replicas: numberOfInstances, - Selector: c.connectionPoolerLabelsSelector(), - Template: *podTemplate, - }, - } - - return deployment, nil -} - -func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1.Service { - - // there are two ways to enable connection pooler, either to specify a - // connectionPooler section or enableConnectionPooler. In the second case - // spec.connectionPooler will be nil, so to make it easier to calculate - // default values, initialize it to an empty structure. It could be done - // anywhere, but here is the earliest common entry point between sync and - // create code, so init here. - if spec.ConnectionPooler == nil { - spec.ConnectionPooler = &acidv1.ConnectionPooler{} - } - - serviceSpec := v1.ServiceSpec{ - Ports: []v1.ServicePort{ - { - Name: c.connectionPoolerName(), - Port: pgPort, - TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)}, - }, - }, - Type: v1.ServiceTypeClusterIP, - Selector: map[string]string{ - "connection-pooler": c.connectionPoolerName(), - }, - } - - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.connectionPoolerName(), - Namespace: c.Namespace, - Labels: c.connectionPoolerLabelsSelector().MatchLabels, - Annotations: map[string]string{}, - // make StatefulSet object its owner to represent the dependency. - // By itself StatefulSet is being deleted with "Orphaned" - // propagation policy, which means that it's deletion will not - // clean up this service, but there is a hope that this object will - // be garbage collected if something went wrong and operator didn't - // deleted it. - OwnerReferences: c.ownerReferences(), - }, - Spec: serviceSpec, - } - - return service -} - func ensurePath(file string, defaultDir string, defaultFile string) string { if file == "" { return path.Join(defaultDir, defaultFile) diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index d09a2c0aa..e880fcc3b 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1,15 +1,17 @@ package cluster import ( - "errors" + "context" "fmt" "reflect" + "sort" "testing" "github.com/stretchr/testify/assert" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" @@ -20,9 +22,18 @@ import ( policyv1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" ) +// For testing purposes +type ExpectedValue struct { + envIndex int + envVarConstant string + envVarValue string +} + func toIntStr(val int) *intstr.IntOrString { b := intstr.FromInt(val) return &b @@ -82,7 +93,7 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { } for _, tt := range tests { cluster.OpConfig = tt.opConfig - result, err := generateSpiloJSONConfiguration(tt.pgParam, tt.patroni, tt.role, logger) + result, err := generateSpiloJSONConfiguration(tt.pgParam, tt.patroni, tt.role, false, logger) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -93,6 +104,119 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { } } +func TestGenerateSpiloPodEnvVars(t *testing.T) { + var cluster = New( + Config{ + OpConfig: config.Config{ + WALGSBucket: "wale-gs-bucket", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + expectedValuesGSBucket := []ExpectedValue{ + ExpectedValue{ + envIndex: 15, + envVarConstant: "WAL_GS_BUCKET", + envVarValue: "wale-gs-bucket", + }, + ExpectedValue{ + envIndex: 16, + envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", + envVarValue: "/SomeUUID", + }, + ExpectedValue{ + envIndex: 17, + envVarConstant: "WAL_BUCKET_SCOPE_PREFIX", + envVarValue: "", + }, + } + + expectedValuesGCPCreds := []ExpectedValue{ + ExpectedValue{ + envIndex: 15, + envVarConstant: "WAL_GS_BUCKET", + envVarValue: "wale-gs-bucket", + }, + ExpectedValue{ + envIndex: 16, + envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", + envVarValue: "/SomeUUID", + }, + ExpectedValue{ + envIndex: 17, + envVarConstant: "WAL_BUCKET_SCOPE_PREFIX", + envVarValue: "", + }, + ExpectedValue{ + envIndex: 18, + envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS", + envVarValue: "some_path_to_credentials", + }, + } + + testName := "TestGenerateSpiloPodEnvVars" + tests := []struct { + subTest string + opConfig config.Config + uid types.UID + spiloConfig string + cloneDescription *acidv1.CloneDescription + standbyDescription *acidv1.StandbyDescription + customEnvList []v1.EnvVar + expectedValues []ExpectedValue + }{ + { + subTest: "Will set WAL_GS_BUCKET env", + opConfig: config.Config{ + WALGSBucket: "wale-gs-bucket", + }, + uid: "SomeUUID", + spiloConfig: "someConfig", + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{}, + customEnvList: []v1.EnvVar{}, + expectedValues: expectedValuesGSBucket, + }, + { + subTest: "Will set GOOGLE_APPLICATION_CREDENTIALS env", + opConfig: config.Config{ + WALGSBucket: "wale-gs-bucket", + GCPCredentials: "some_path_to_credentials", + }, + uid: "SomeUUID", + spiloConfig: "someConfig", + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{}, + customEnvList: []v1.EnvVar{}, + expectedValues: expectedValuesGCPCreds, + }, + } + + for _, tt := range tests { + cluster.OpConfig = tt.opConfig + + actualEnvs := cluster.generateSpiloPodEnvVars(tt.uid, tt.spiloConfig, tt.cloneDescription, tt.standbyDescription, tt.customEnvList) + + for _, ev := range tt.expectedValues { + env := actualEnvs[ev.envIndex] + + if env.Name != ev.envVarConstant { + t.Errorf("%s %s: Expected env name %s, have %s instead", + testName, tt.subTest, ev.envVarConstant, env.Name) + } + + if env.Value != ev.envVarValue { + t.Errorf("%s %s: Expected env value %s, have %s instead", + testName, tt.subTest, ev.envVarValue, env.Value) + } + } + } +} + func TestCreateLoadBalancerLogic(t *testing.T) { var cluster = New( Config{ @@ -432,95 +556,6 @@ func TestExtractPgVersionFromBinPath(t *testing.T) { } } -func TestGetPgVersion(t *testing.T) { - testName := "TestGetPgVersion" - tests := []struct { - subTest string - pgContainer v1.Container - currentPgVersion string - newPgVersion string - }{ - { - subTest: "new version with decimal point differs from current SPILO_CONFIGURATION", - pgContainer: v1.Container{ - Name: "postgres", - Env: []v1.EnvVar{ - { - Name: "SPILO_CONFIGURATION", - Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/9.6/bin\"}}", - }, - }, - }, - currentPgVersion: "9.6", - newPgVersion: "12", - }, - { - subTest: "new version differs from current SPILO_CONFIGURATION", - pgContainer: v1.Container{ - Name: "postgres", - Env: []v1.EnvVar{ - { - Name: "SPILO_CONFIGURATION", - Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/11/bin\"}}", - }, - }, - }, - currentPgVersion: "11", - newPgVersion: "12", - }, - { - subTest: "new version is lower than the one found in current SPILO_CONFIGURATION", - pgContainer: v1.Container{ - Name: "postgres", - Env: []v1.EnvVar{ - { - Name: "SPILO_CONFIGURATION", - Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/12/bin\"}}", - }, - }, - }, - currentPgVersion: "12", - newPgVersion: "11", - }, - { - subTest: "new version is the same like in the current SPILO_CONFIGURATION", - pgContainer: v1.Container{ - Name: "postgres", - Env: []v1.EnvVar{ - { - Name: "SPILO_CONFIGURATION", - Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/12/bin\"}}", - }, - }, - }, - currentPgVersion: "12", - newPgVersion: "12", - }, - } - - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - - for _, tt := range tests { - pgVersion, err := cluster.getNewPgVersion(tt.pgContainer, tt.newPgVersion) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if pgVersion != tt.currentPgVersion { - t.Errorf("%s %s: Expected version %s, have %s instead", - testName, tt.subTest, tt.currentPgVersion, pgVersion) - } - } -} - func TestSecretVolume(t *testing.T) { testName := "TestSecretVolume" tests := []struct { @@ -592,46 +627,219 @@ func TestSecretVolume(t *testing.T) { } } -func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { - cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"] - if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest { - return fmt.Errorf("CPU request doesn't match, got %s, expected %s", - cpuReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest) - } +const ( + testPodEnvironmentConfigMapName = "pod_env_cm" + testPodEnvironmentSecretName = "pod_env_sc" +) - memReq := podSpec.Spec.Containers[0].Resources.Requests["memory"] - if memReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest { - return fmt.Errorf("Memory request doesn't match, got %s, expected %s", - memReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest) - } - - cpuLim := podSpec.Spec.Containers[0].Resources.Limits["cpu"] - if cpuLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit { - return fmt.Errorf("CPU limit doesn't match, got %s, expected %s", - cpuLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit) - } - - memLim := podSpec.Spec.Containers[0].Resources.Limits["memory"] - if memLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit { - return fmt.Errorf("Memory limit doesn't match, got %s, expected %s", - memLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit) - } - - return nil +type mockSecret struct { + v1core.SecretInterface } -func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { - poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"] - - if poolerLabels != cluster.connectionPoolerLabelsSelector().MatchLabels["connection-pooler"] { - return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", - podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector().MatchLabels) - } - - return nil +type mockConfigMap struct { + v1core.ConfigMapInterface } -func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { +func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) { + if name != testPodEnvironmentSecretName { + return nil, fmt.Errorf("Secret PodEnvironmentSecret not found") + } + secret := &v1.Secret{} + secret.Name = testPodEnvironmentSecretName + secret.Data = map[string][]byte{ + "minio_access_key": []byte("alpha"), + "minio_secret_key": []byte("beta"), + } + return secret, nil +} + +func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) { + if name != testPodEnvironmentConfigMapName { + return nil, fmt.Errorf("NotFound") + } + configmap := &v1.ConfigMap{} + configmap.Name = testPodEnvironmentConfigMapName + configmap.Data = map[string]string{ + "foo1": "bar1", + "foo2": "bar2", + } + return configmap, nil +} + +type MockSecretGetter struct { +} + +type MockConfigMapsGetter struct { +} + +func (c *MockSecretGetter) Secrets(namespace string) v1core.SecretInterface { + return &mockSecret{} +} + +func (c *MockConfigMapsGetter) ConfigMaps(namespace string) v1core.ConfigMapInterface { + return &mockConfigMap{} +} + +func newMockKubernetesClient() k8sutil.KubernetesClient { + return k8sutil.KubernetesClient{ + SecretsGetter: &MockSecretGetter{}, + ConfigMapsGetter: &MockConfigMapsGetter{}, + } +} +func newMockCluster(opConfig config.Config) *Cluster { + cluster := &Cluster{ + Config: Config{OpConfig: opConfig}, + KubeClient: newMockKubernetesClient(), + } + return cluster +} + +func TestPodEnvironmentConfigMapVariables(t *testing.T) { + testName := "TestPodEnvironmentConfigMapVariables" + tests := []struct { + subTest string + opConfig config.Config + envVars []v1.EnvVar + err error + }{ + { + subTest: "no PodEnvironmentConfigMap", + envVars: []v1.EnvVar{}, + }, + { + subTest: "missing PodEnvironmentConfigMap", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: "idonotexist", + }, + }, + }, + err: fmt.Errorf("could not read PodEnvironmentConfigMap: NotFound"), + }, + { + subTest: "simple PodEnvironmentConfigMap", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, + }, + }, + }, + envVars: []v1.EnvVar{ + { + Name: "foo1", + Value: "bar1", + }, + { + Name: "foo2", + Value: "bar2", + }, + }, + }, + } + for _, tt := range tests { + c := newMockCluster(tt.opConfig) + vars, err := c.getPodEnvironmentConfigMapVariables() + sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name }) + if !reflect.DeepEqual(vars, tt.envVars) { + t.Errorf("%s %s: expected `%v` but got `%v`", + testName, tt.subTest, tt.envVars, vars) + } + if tt.err != nil { + if err.Error() != tt.err.Error() { + t.Errorf("%s %s: expected error `%v` but got `%v`", + testName, tt.subTest, tt.err, err) + } + } else { + if err != nil { + t.Errorf("%s %s: expected no error but got error: `%v`", + testName, tt.subTest, err) + } + } + } +} + +// Test if the keys of an existing secret are properly referenced +func TestPodEnvironmentSecretVariables(t *testing.T) { + testName := "TestPodEnvironmentSecretVariables" + tests := []struct { + subTest string + opConfig config.Config + envVars []v1.EnvVar + err error + }{ + { + subTest: "No PodEnvironmentSecret configured", + envVars: []v1.EnvVar{}, + }, + { + subTest: "Secret referenced by PodEnvironmentSecret does not exist", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentSecret: "idonotexist", + }, + }, + err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: Secret PodEnvironmentSecret not found"), + }, + { + subTest: "Pod environment vars reference all keys from secret configured by PodEnvironmentSecret", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentSecret: testPodEnvironmentSecretName, + }, + }, + envVars: []v1.EnvVar{ + { + Name: "minio_access_key", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "minio_access_key", + }, + }, + }, + { + Name: "minio_secret_key", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "minio_secret_key", + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + c := newMockCluster(tt.opConfig) + vars, err := c.getPodEnvironmentSecretVariables() + sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name }) + if !reflect.DeepEqual(vars, tt.envVars) { + t.Errorf("%s %s: expected `%v` but got `%v`", + testName, tt.subTest, tt.envVars, vars) + } + if tt.err != nil { + if err.Error() != tt.err.Error() { + t.Errorf("%s %s: expected error `%v` but got `%v`", + testName, tt.subTest, tt.err, err) + } + } else { + if err != nil { + t.Errorf("%s %s: expected no error but got error: `%v`", + testName, tt.subTest, err) + } + } + } + +} + +func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { required := map[string]bool{ "PGHOST": false, "PGPORT": false, @@ -656,6 +864,72 @@ func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { return nil } +func TestNodeAffinity(t *testing.T) { + var err error + var spec acidv1.PostgresSpec + var cluster *Cluster + var spiloRunAsUser = int64(101) + var spiloRunAsGroup = int64(103) + var spiloFSGroup = int64(103) + + makeSpec := func(nodeAffinity *v1.NodeAffinity) acidv1.PostgresSpec { + return acidv1.PostgresSpec{ + TeamID: "myapp", NumberOfInstances: 1, + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + }, + Volume: acidv1.Volume{ + Size: "1G", + }, + NodeAffinity: nodeAffinity, + } + } + + cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + Resources: config.Resources{ + SpiloRunAsUser: &spiloRunAsUser, + SpiloRunAsGroup: &spiloRunAsGroup, + SpiloFSGroup: &spiloFSGroup, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + nodeAff := &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "test-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{ + "test-value", + }, + }, + }, + }, + }, + }, + } + spec = makeSpec(nodeAff) + s, err := cluster.generateStatefulSet(&spec) + if err != nil { + assert.NoError(t, err) + } + + assert.NotNil(t, s.Spec.Template.Spec.Affinity.NodeAffinity, "node affinity in statefulset shouldn't be nil") + assert.Equal(t, s.Spec.Template.Spec.Affinity.NodeAffinity, nodeAff, "cluster template has correct node affinity") +} + func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { if podSpec.ObjectMeta.Name != "test-pod-template" { return fmt.Errorf("Custom pod template is not used, current spec %+v", @@ -665,110 +939,7 @@ func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error return nil } -func TestConnectionPoolerPodSpec(t *testing.T) { - testName := "Test connection pooler pod template generation" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - MaxDBConnections: int32ToPointer(60), - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - - var clusterNoDefaultRes = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{}, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - - noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { return nil } - - tests := []struct { - subTest string - spec *acidv1.PostgresSpec - expected error - cluster *Cluster - check func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error - }{ - { - subTest: "default configuration", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: noCheck, - }, - { - subTest: "no default resources", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`), - cluster: clusterNoDefaultRes, - check: noCheck, - }, - { - subTest: "default resources are set", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: testResources, - }, - { - subTest: "labels for service", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: testLabels, - }, - { - subTest: "required envs", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: testEnvs, - }, - } - for _, tt := range tests { - podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec) - - if err != tt.expected && err.Error() != tt.expected.Error() { - t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", - testName, tt.subTest, err, tt.expected) - } - - err = tt.check(cluster, podSpec) - if err != nil { - t.Errorf("%s [%s]: Pod spec is incorrect, %+v", - testName, tt.subTest, err) - } - } -} - -func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployment) error { +func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error { owner := deployment.ObjectMeta.OwnerReferences[0] if owner.Name != cluster.Statefulset.ObjectMeta.Name { @@ -779,98 +950,7 @@ func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployme return nil } -func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { - labels := deployment.Spec.Selector.MatchLabels - expected := cluster.connectionPoolerLabelsSelector().MatchLabels - - if labels["connection-pooler"] != expected["connection-pooler"] { - return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", - labels, expected) - } - - return nil -} - -func TestConnectionPoolerDeploymentSpec(t *testing.T) { - testName := "Test connection pooler deployment spec generation" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - cluster.Statefulset = &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - }, - } - - noCheck := func(cluster *Cluster, deployment *appsv1.Deployment) error { - return nil - } - - tests := []struct { - subTest string - spec *acidv1.PostgresSpec - expected error - cluster *Cluster - check func(cluster *Cluster, deployment *appsv1.Deployment) error - }{ - { - subTest: "default configuration", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: noCheck, - }, - { - subTest: "owner reference", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: testDeploymentOwnwerReference, - }, - { - subTest: "selector", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - expected: nil, - cluster: cluster, - check: testSelector, - }, - } - for _, tt := range tests { - deployment, err := tt.cluster.generateConnectionPoolerDeployment(tt.spec) - - if err != tt.expected && err.Error() != tt.expected.Error() { - t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", - testName, tt.subTest, err, tt.expected) - } - - err = tt.check(cluster, deployment) - if err != nil { - t.Errorf("%s [%s]: Deployment spec is incorrect, %+v", - testName, tt.subTest, err) - } - } -} - -func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error { +func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error { owner := service.ObjectMeta.OwnerReferences[0] if owner.Name != cluster.Statefulset.ObjectMeta.Name { @@ -881,90 +961,12 @@ func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error { return nil } -func testServiceSelector(cluster *Cluster, service *v1.Service) error { - selector := service.Spec.Selector - - if selector["connection-pooler"] != cluster.connectionPoolerName() { - return fmt.Errorf("Selector is incorrect, got %s, expected %s", - selector["connection-pooler"], cluster.connectionPoolerName()) - } - - return nil -} - -func TestConnectionPoolerServiceSpec(t *testing.T) { - testName := "Test connection pooler service spec generation" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - cluster.Statefulset = &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - }, - } - - noCheck := func(cluster *Cluster, deployment *v1.Service) error { - return nil - } - - tests := []struct { - subTest string - spec *acidv1.PostgresSpec - cluster *Cluster - check func(cluster *Cluster, deployment *v1.Service) error - }{ - { - subTest: "default configuration", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - cluster: cluster, - check: noCheck, - }, - { - subTest: "owner reference", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - cluster: cluster, - check: testServiceOwnwerReference, - }, - { - subTest: "selector", - spec: &acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - cluster: cluster, - check: testServiceSelector, - }, - } - for _, tt := range tests { - service := tt.cluster.generateConnectionPoolerService(tt.spec) - - if err := tt.check(cluster, service); err != nil { - t.Errorf("%s [%s]: Service spec is incorrect, %+v", - testName, tt.subTest, err) - } - } -} - func TestTLS(t *testing.T) { var err error var spec acidv1.PostgresSpec var cluster *Cluster + var spiloRunAsUser = int64(101) + var spiloRunAsGroup = int64(103) var spiloFSGroup = int64(103) var additionalVolumes = spec.AdditionalVolumes @@ -992,7 +994,9 @@ func TestTLS(t *testing.T) { ReplicationUsername: replicationUserName, }, Resources: config.Resources{ - SpiloFSGroup: &spiloFSGroup, + SpiloRunAsUser: &spiloRunAsUser, + SpiloRunAsGroup: &spiloRunAsGroup, + SpiloFSGroup: &spiloFSGroup, }, }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) @@ -1394,7 +1398,7 @@ func TestSidecars(t *testing.T) { // replaced sidecar // the order in env is important - scalyrEnv := append([]v1.EnvVar{v1.EnvVar{Name: "SCALYR_API_KEY", Value: "abc"}, v1.EnvVar{Name: "SCALYR_SERVER_HOST", Value: ""}}, env...) + scalyrEnv := append(env, v1.EnvVar{Name: "SCALYR_API_KEY", Value: "abc"}, v1.EnvVar{Name: "SCALYR_SERVER_HOST", Value: ""}) assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{ Name: "scalyr-sidecar", Image: "scalyr-image", @@ -1405,3 +1409,83 @@ func TestSidecars(t *testing.T) { }) } + +func TestGenerateService(t *testing.T) { + var spec acidv1.PostgresSpec + var cluster *Cluster + var enableLB bool = true + spec = acidv1.PostgresSpec{ + TeamID: "myapp", NumberOfInstances: 1, + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + }, + Volume: acidv1.Volume{ + Size: "1G", + }, + Sidecars: []acidv1.Sidecar{ + acidv1.Sidecar{ + Name: "cluster-specific-sidecar", + }, + acidv1.Sidecar{ + Name: "cluster-specific-sidecar-with-resources", + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"}, + }, + }, + acidv1.Sidecar{ + Name: "replace-sidecar", + DockerImage: "overwrite-image", + }, + }, + EnableMasterLoadBalancer: &enableLB, + } + + cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + Resources: config.Resources{ + DefaultCPURequest: "200m", + DefaultCPULimit: "500m", + DefaultMemoryRequest: "0.7Gi", + DefaultMemoryLimit: "1.3Gi", + }, + SidecarImages: map[string]string{ + "deprecated-global-sidecar": "image:123", + }, + SidecarContainers: []v1.Container{ + v1.Container{ + Name: "global-sidecar", + }, + // will be replaced by a cluster specific sidecar with the same name + v1.Container{ + Name: "replace-sidecar", + Image: "replaced-image", + }, + }, + Scalyr: config.Scalyr{ + ScalyrAPIKey: "abc", + ScalyrImage: "scalyr-image", + ScalyrCPURequest: "220m", + ScalyrCPULimit: "520m", + ScalyrMemoryRequest: "0.9Gi", + // ise default memory limit + }, + ExternalTrafficPolicy: "Cluster", + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + service := cluster.generateService(Master, &spec) + assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeCluster, service.Spec.ExternalTrafficPolicy) + cluster.OpConfig.ExternalTrafficPolicy = "Local" + service = cluster.generateService(Master, &spec) + assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy) + +} diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index 44b2222e0..a13eb479c 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -304,9 +304,16 @@ func (c *Cluster) isSafeToRecreatePods(pods *v1.PodList) bool { after this check succeeds but before a pod is re-created */ + for _, pod := range pods.Items { + c.logger.Debugf("name=%s phase=%s ip=%s", pod.Name, pod.Status.Phase, pod.Status.PodIP) + } + for _, pod := range pods.Items { state, err := c.patroni.GetPatroniMemberState(&pod) - if err != nil || state == "creating replica" { + if err != nil { + c.logger.Errorf("failed to get Patroni state for pod: %s", err) + return false + } else if state == "creating replica" { c.logger.Warningf("cannot re-create replica %s: it is currently being initialized", pod.Name) return false } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 3528c46f4..bcc568adc 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -94,137 +94,6 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) { return statefulSet, nil } -// Prepare the database for connection pooler to be used, i.e. install lookup -// function (do it first, because it should be fast and if it didn't succeed, -// it doesn't makes sense to create more K8S objects. At this moment we assume -// that necessary connection pooler user exists. -// -// After that create all the objects for connection pooler, namely a deployment -// with a chosen pooler and a service to expose it. -func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoolerObjects, error) { - var msg string - c.setProcessName("creating connection pooler") - - if c.ConnectionPooler == nil { - c.ConnectionPooler = &ConnectionPoolerObjects{} - } - - schema := c.Spec.ConnectionPooler.Schema - - if schema == "" { - schema = c.OpConfig.ConnectionPooler.Schema - } - - user := c.Spec.ConnectionPooler.User - if user == "" { - user = c.OpConfig.ConnectionPooler.User - } - - err := lookup(schema, user) - - if err != nil { - msg = "could not prepare database for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return nil, fmt.Errorf(msg, err) - } - - // client-go does retry 10 times (with NoBackoff by default) when the API - // believe a request can be retried and returns Retry-After header. This - // should be good enough to not think about it here. - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - - serviceSpec := c.generateConnectionPoolerService(&c.Spec) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return nil, err - } - - c.ConnectionPooler = &ConnectionPoolerObjects{ - Deployment: deployment, - Service: service, - } - c.logger.Debugf("created new connection pooler %q, uid: %q", - util.NameFromMeta(deployment.ObjectMeta), deployment.UID) - - return c.ConnectionPooler, nil -} - -func (c *Cluster) deleteConnectionPooler() (err error) { - c.setProcessName("deleting connection pooler") - c.logger.Debugln("deleting connection pooler") - - // Lack of connection pooler objects is not a fatal error, just log it if - // it was present before in the manifest - if c.ConnectionPooler == nil { - c.logger.Infof("No connection pooler to delete") - return nil - } - - // Clean up the deployment object. If deployment resource we've remembered - // is somehow empty, try to delete based on what would we generate - deploymentName := c.connectionPoolerName() - deployment := c.ConnectionPooler.Deployment - - if deployment != nil { - deploymentName = deployment.Name - } - - // set delete propagation policy to foreground, so that replica set will be - // also deleted. - policy := metav1.DeletePropagationForeground - options := metav1.DeleteOptions{PropagationPolicy: &policy} - err = c.KubeClient. - Deployments(c.Namespace). - Delete(context.TODO(), deploymentName, options) - - if !k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler deployment was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete deployment: %v", err) - } - - c.logger.Infof("Connection pooler deployment %q has been deleted", deploymentName) - - // Repeat the same for the service object - service := c.ConnectionPooler.Service - serviceName := c.connectionPoolerName() - - if service != nil { - serviceName = service.Name - } - - // set delete propagation policy to foreground, so that all the dependant - // will be deleted. - err = c.KubeClient. - Services(c.Namespace). - Delete(context.TODO(), serviceName, options) - - if !k8sutil.ResourceNotFound(err) { - c.logger.Debugf("Connection pooler service was already deleted") - } else if err != nil { - return fmt.Errorf("could not delete service: %v", err) - } - - c.logger.Infof("Connection pooler service %q has been deleted", serviceName) - - c.ConnectionPooler = nil - return nil -} - func getPodIndex(podName string) (int32, error) { parts := strings.Split(podName, "-") if len(parts) == 0 { @@ -280,7 +149,7 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error { // setRollingUpdateFlagForStatefulSet sets the indicator or the rolling update requirement // in the StatefulSet annotation. -func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, val bool) { +func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, val bool, msg string) { anno := sset.GetAnnotations() if anno == nil { anno = make(map[string]string) @@ -288,13 +157,13 @@ func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, v anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val) sset.SetAnnotations(anno) - c.logger.Debugf("statefulset's rolling update annotation has been set to %t", val) + c.logger.Debugf("set statefulset's rolling update annotation to %t: caller/reason %s", val, msg) } // applyRollingUpdateFlagforStatefulSet sets the rolling update flag for the cluster's StatefulSet // and applies that setting to the actual running cluster. func (c *Cluster) applyRollingUpdateFlagforStatefulSet(val bool) error { - c.setRollingUpdateFlagForStatefulSet(c.Statefulset, val) + c.setRollingUpdateFlagForStatefulSet(c.Statefulset, val, "applyRollingUpdateFlag") sset, err := c.updateStatefulSetAnnotations(c.Statefulset.GetAnnotations()) if err != nil { return err @@ -346,14 +215,13 @@ func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *appsv1.St podsRollingUpdateRequired = false } else { c.logger.Infof("found a statefulset with an unfinished rolling update of the pods") - } } return podsRollingUpdateRequired } func (c *Cluster) updateStatefulSetAnnotations(annotations map[string]string) (*appsv1.StatefulSet, error) { - c.logger.Debugf("updating statefulset annotations") + c.logger.Debugf("patching statefulset annotations") patchData, err := metaAnnotationsPatch(annotations) if err != nil { return nil, fmt.Errorf("could not form patch for the statefulset metadata: %v", err) @@ -725,17 +593,37 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error { return nil } -func (c *Cluster) deleteSecret(secret *v1.Secret) error { - c.setProcessName("deleting secret %q", util.NameFromMeta(secret.ObjectMeta)) - c.logger.Debugf("deleting secret %q", util.NameFromMeta(secret.ObjectMeta)) +func (c *Cluster) deleteSecrets() error { + c.setProcessName("deleting secrets") + var errors []string + errorCount := 0 + for uid, secret := range c.Secrets { + err := c.deleteSecret(uid, *secret) + if err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + errorCount++ + } + } + + if errorCount > 0 { + return fmt.Errorf("could not delete all secrets: %v", errors) + } + + return nil +} + +func (c *Cluster) deleteSecret(uid types.UID, secret v1.Secret) error { + c.setProcessName("deleting secret") + secretName := util.NameFromMeta(secret.ObjectMeta) + c.logger.Debugf("deleting secret %q", secretName) err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions) if err != nil { - return err + return fmt.Errorf("could not delete secret %q: %v", secretName, err) } - c.logger.Infof("secret %q has been deleted", util.NameFromMeta(secret.ObjectMeta)) - delete(c.Secrets, secret.UID) + c.logger.Infof("secret %q has been deleted", secretName) + c.Secrets[uid] = nil - return err + return nil } func (c *Cluster) createRoles() (err error) { @@ -820,57 +708,3 @@ func (c *Cluster) GetStatefulSet() *appsv1.StatefulSet { func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget { return c.PodDisruptionBudget } - -// Perform actual patching of a connection pooler deployment, assuming that all -// the check were already done before. -func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) { - c.setProcessName("updating connection pooler") - if c.ConnectionPooler == nil || c.ConnectionPooler.Deployment == nil { - return nil, fmt.Errorf("there is no connection pooler in the cluster") - } - - patchData, err := specPatch(newDeployment.Spec) - if err != nil { - return nil, fmt.Errorf("could not form patch for the deployment: %v", err) - } - - // An update probably requires RetryOnConflict, but since only one operator - // worker at one time will try to update it chances of conflicts are - // minimal. - deployment, err := c.KubeClient. - Deployments(c.ConnectionPooler.Deployment.Namespace).Patch( - context.TODO(), - c.ConnectionPooler.Deployment.Name, - types.MergePatchType, - patchData, - metav1.PatchOptions{}, - "") - if err != nil { - return nil, fmt.Errorf("could not patch deployment: %v", err) - } - - c.ConnectionPooler.Deployment = deployment - - return deployment, nil -} - -//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment -func (c *Cluster) updateConnectionPoolerAnnotations(annotations map[string]string) (*appsv1.Deployment, error) { - c.logger.Debugf("updating connection pooler annotations") - patchData, err := metaAnnotationsPatch(annotations) - if err != nil { - return nil, fmt.Errorf("could not form patch for the deployment metadata: %v", err) - } - result, err := c.KubeClient.Deployments(c.ConnectionPooler.Deployment.Namespace).Patch( - context.TODO(), - c.ConnectionPooler.Deployment.Name, - types.MergePatchType, - []byte(patchData), - metav1.PatchOptions{}, - "") - if err != nil { - return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err) - } - return result, nil - -} diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go deleted file mode 100644 index 9739cc354..000000000 --- a/pkg/cluster/resources_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package cluster - -import ( - "testing" - - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/util/config" - "github.com/zalando/postgres-operator/pkg/util/k8sutil" - - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func mockInstallLookupFunction(schema string, user string) error { - return nil -} - -func boolToPointer(value bool) *bool { - return &value -} - -func TestConnectionPoolerCreationAndDeletion(t *testing.T) { - testName := "Test connection pooler creation" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - }, - }, - }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) - - cluster.Statefulset = &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - }, - } - - cluster.Spec = acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - poolerResources, err := cluster.createConnectionPooler(mockInstallLookupFunction) - - if err != nil { - t.Errorf("%s: Cannot create connection pooler, %s, %+v", - testName, err, poolerResources) - } - - if poolerResources.Deployment == nil { - t.Errorf("%s: Connection pooler deployment is empty", testName) - } - - if poolerResources.Service == nil { - t.Errorf("%s: Connection pooler service is empty", testName) - } - - err = cluster.deleteConnectionPooler() - if err != nil { - t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) - } -} - -func TestNeedConnectionPooler(t *testing.T) { - testName := "Test how connection pooler can be enabled" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - }, - }, - }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) - - cluster.Spec = acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - - if !cluster.needConnectionPooler() { - t.Errorf("%s: Connection pooler is not enabled with full definition", - testName) - } - - cluster.Spec = acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - } - - if !cluster.needConnectionPooler() { - t.Errorf("%s: Connection pooler is not enabled with flag", - testName) - } - - cluster.Spec = acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(false), - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - - if cluster.needConnectionPooler() { - t.Errorf("%s: Connection pooler is still enabled with flag being false", - testName) - } - - cluster.Spec = acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, - } - - if !cluster.needConnectionPooler() { - t.Errorf("%s: Connection pooler is not enabled with flag and full", - testName) - } -} diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 697fc2d05..dc54ae8ee 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -11,7 +11,6 @@ import ( "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" - "github.com/zalando/postgres-operator/pkg/util/volumes" appsv1 "k8s.io/api/apps/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/api/core/v1" @@ -32,9 +31,9 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { defer func() { if err != nil { c.logger.Warningf("error while syncing cluster state: %v", err) - c.setStatus(acidv1.ClusterStatusSyncFailed) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed) } else if !c.Status.Running() { - c.setStatus(acidv1.ClusterStatusRunning) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) } }() @@ -43,30 +42,52 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { return err } - c.logger.Debugf("syncing secrets") - //TODO: mind the secrets of the deleted/new users if err = c.syncSecrets(); err != nil { err = fmt.Errorf("could not sync secrets: %v", err) return err } - c.logger.Debugf("syncing services") if err = c.syncServices(); err != nil { err = fmt.Errorf("could not sync services: %v", err) return err } - // potentially enlarge volumes before changing the statefulset. By doing that - // in this order we make sure the operator is not stuck waiting for a pod that - // cannot start because it ran out of disk space. - // TODO: handle the case of the cluster that is downsized and enlarged again - // (there will be a volume from the old pod for which we can't act before the - // the statefulset modification is concluded) - c.logger.Debugf("syncing persistent volumes") - if err = c.syncVolumes(); err != nil { - err = fmt.Errorf("could not sync persistent volumes: %v", err) - return err + c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode) + + if c.OpConfig.EnableEBSGp3Migration { + err = c.executeEBSMigration() + if nil != err { + return err + } + } + + if c.OpConfig.StorageResizeMode == "mixed" { + // mixed op uses AWS API to adjust size,throughput,iops and calls pvc chance for file system resize + + // resize pvc to adjust filesystem size until better K8s support + if err = c.syncVolumeClaims(); err != nil { + err = fmt.Errorf("could not sync persistent volume claims: %v", err) + return err + } + } else if c.OpConfig.StorageResizeMode == "pvc" { + if err = c.syncVolumeClaims(); err != nil { + err = fmt.Errorf("could not sync persistent volume claims: %v", err) + return err + } + } else if c.OpConfig.StorageResizeMode == "ebs" { + // potentially enlarge volumes before changing the statefulset. By doing that + // in this order we make sure the operator is not stuck waiting for a pod that + // cannot start because it ran out of disk space. + // TODO: handle the case of the cluster that is downsized and enlarged again + // (there will be a volume from the old pod for which we can't act before the + // the statefulset modification is concluded) + if err = c.syncVolumes(); err != nil { + err = fmt.Errorf("could not sync persistent volumes: %v", err) + return err + } + } else { + c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") } if err = c.enforceMinResourceLimits(&c.Spec); err != nil { @@ -322,29 +343,17 @@ func (c *Cluster) syncStatefulSet() error { // statefulset is already there, make sure we use its definition in order to compare with the spec. c.Statefulset = sset - // check if there is no Postgres version mismatch - for _, container := range c.Statefulset.Spec.Template.Spec.Containers { - if container.Name != "postgres" { - continue - } - pgVersion, err := c.getNewPgVersion(container, c.Spec.PostgresqlParam.PgVersion) - if err != nil { - return fmt.Errorf("could not parse current Postgres version: %v", err) - } - c.Spec.PostgresqlParam.PgVersion = pgVersion - } - desiredSS, err := c.generateStatefulSet(&c.Spec) if err != nil { return fmt.Errorf("could not generate statefulset: %v", err) } - c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired) + c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "from cache") cmp := c.compareStatefulSetWith(desiredSS) if !cmp.match { if cmp.rollingUpdate && !podsRollingUpdateRequired { podsRollingUpdateRequired = true - c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired) + c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "statefulset changes") } c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons) @@ -359,8 +368,8 @@ func (c *Cluster) syncStatefulSet() error { } } } - annotations := c.AnnotationsToPropagate(c.Statefulset.Annotations) - c.updateStatefulSetAnnotations(annotations) + + c.updateStatefulSetAnnotations(c.AnnotationsToPropagate(c.annotationsSet(c.Statefulset.Annotations))) if !podsRollingUpdateRequired && !c.OpConfig.EnableLazySpiloUpgrade { // even if desired and actual statefulsets match @@ -403,11 +412,15 @@ func (c *Cluster) syncStatefulSet() error { // AnnotationsToPropagate get the annotations to update if required // based on the annotations in postgres CRD func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string { - toPropagateAnnotations := c.OpConfig.DownscalerAnnotations - pgCRDAnnotations := c.Postgresql.ObjectMeta.GetAnnotations() - if toPropagateAnnotations != nil && pgCRDAnnotations != nil { - for _, anno := range toPropagateAnnotations { + if annotations == nil { + annotations = make(map[string]string) + } + + pgCRDAnnotations := c.ObjectMeta.Annotations + + if pgCRDAnnotations != nil { + for _, anno := range c.OpConfig.DownscalerAnnotations { for k, v := range pgCRDAnnotations { matched, err := regexp.MatchString(anno, k) if err != nil { @@ -421,7 +434,11 @@ func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[stri } } - return annotations + if len(annotations) > 0 { + return annotations + } + + return nil } // checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters @@ -472,6 +489,7 @@ func (c *Cluster) syncSecrets() error { err error secret *v1.Secret ) + c.logger.Info("syncing secrets") c.setProcessName("syncing secrets") secrets := c.generateUserSecrets() @@ -487,10 +505,11 @@ func (c *Cluster) syncSecrets() error { return fmt.Errorf("could not get current secret: %v", err) } if secretUsername != string(secret.Data["username"]) { - c.logger.Warningf("secret %q does not contain the role %q", secretSpec.Name, secretUsername) + c.logger.Errorf("secret %s does not contain the role %q", secretSpec.Name, secretUsername) continue } - c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta)) + c.Secrets[secret.UID] = secret + c.logger.Debugf("secret %s already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta)) if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name { secretUsername = constants.SuperuserKeyName userMap = c.systemUsers @@ -549,7 +568,7 @@ func (c *Cluster) syncRoles() (err error) { userNames = append(userNames, u.Name) } - if c.needConnectionPooler() { + if needMasterConnectionPooler(&c.Spec) || needReplicaConnectionPooler(&c.Spec) { connectionPoolerUser := c.systemUsers[constants.ConnectionPoolerUserKeyName] userNames = append(userNames, connectionPoolerUser.Name) @@ -571,6 +590,27 @@ func (c *Cluster) syncRoles() (err error) { return nil } +// syncVolumeClaims reads all persistent volume claims and checks that their size matches the one declared in the statefulset. +func (c *Cluster) syncVolumeClaims() error { + c.setProcessName("syncing volume claims") + + act, err := c.volumeClaimsNeedResizing(c.Spec.Volume) + if err != nil { + return fmt.Errorf("could not compare size of the volume claims: %v", err) + } + if !act { + c.logger.Infof("volume claims do not require changes") + return nil + } + if err := c.resizeVolumeClaims(c.Spec.Volume); err != nil { + return fmt.Errorf("could not sync volume claims: %v", err) + } + + c.logger.Infof("volume claims have been synced successfully") + + return nil +} + // syncVolumes reads all persistent volumes and checks that their size matches the one declared in the statefulset. func (c *Cluster) syncVolumes() error { c.setProcessName("syncing volumes") @@ -582,7 +622,8 @@ func (c *Cluster) syncVolumes() error { if !act { return nil } - if err := c.resizeVolumes(c.Spec.Volume, []volumes.VolumeResizer{&volumes.EBSVolumeResizer{AWSRegion: c.OpConfig.AWSRegion}}); err != nil { + + if err := c.resizeVolumes(); err != nil { return fmt.Errorf("could not sync volumes: %v", err) } @@ -664,12 +705,8 @@ func (c *Cluster) syncPreparedDatabases() error { if err := c.initDbConnWithName(preparedDbName); err != nil { return fmt.Errorf("could not init connection to database %s: %v", preparedDbName, err) } - defer func() { - if err := c.closeDbConn(); err != nil { - c.logger.Errorf("could not close database connection: %v", err) - } - }() + c.logger.Debugf("syncing prepared database %q", preparedDbName) // now, prepare defined schemas preparedSchemas := preparedDB.PreparedSchemas if len(preparedDB.PreparedSchemas) == 0 { @@ -683,6 +720,10 @@ func (c *Cluster) syncPreparedDatabases() error { if err := c.syncExtensions(preparedDB.Extensions); err != nil { return err } + + if err := c.closeDbConn(); err != nil { + c.logger.Errorf("could not close database connection: %v", err) + } } return nil @@ -772,7 +813,7 @@ func (c *Cluster) syncLogicalBackupJob() error { return fmt.Errorf("could not generate the desired logical backup job state: %v", err) } if match, reason := k8sutil.SameLogicalBackupJob(job, desiredJob); !match { - c.logger.Infof("logical job %q is not in the desired state and needs to be updated", + c.logger.Infof("logical job %s is not in the desired state and needs to be updated", c.getLogicalBackupJobName(), ) if reason != "" { @@ -793,12 +834,12 @@ func (c *Cluster) syncLogicalBackupJob() error { c.logger.Info("could not find the cluster's logical backup job") if err = c.createLogicalBackupJob(); err == nil { - c.logger.Infof("created missing logical backup job %q", jobName) + c.logger.Infof("created missing logical backup job %s", jobName) } else { if !k8sutil.ResourceAlreadyExists(err) { return fmt.Errorf("could not create missing logical backup job: %v", err) } - c.logger.Infof("logical backup job %q already exists", jobName) + c.logger.Infof("logical backup job %s already exists", jobName) if _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(context.TODO(), jobName, metav1.GetOptions{}); err != nil { return fmt.Errorf("could not fetch existing logical backup job: %v", err) } @@ -806,196 +847,3 @@ func (c *Cluster) syncLogicalBackupJob() error { return nil } - -func (c *Cluster) syncConnectionPooler(oldSpec, - newSpec *acidv1.Postgresql, - lookup InstallFunction) (SyncReason, error) { - - var reason SyncReason - var err error - - if c.ConnectionPooler == nil { - c.ConnectionPooler = &ConnectionPoolerObjects{} - } - - newNeedConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec) - oldNeedConnectionPooler := c.needConnectionPoolerWorker(&oldSpec.Spec) - - if newNeedConnectionPooler { - // Try to sync in any case. If we didn't needed connection pooler before, - // it means we want to create it. If it was already present, still sync - // since it could happen that there is no difference in specs, and all - // the resources are remembered, but the deployment was manually deleted - // in between - c.logger.Debug("syncing connection pooler") - - // in this case also do not forget to install lookup function as for - // creating cluster - if !oldNeedConnectionPooler || !c.ConnectionPooler.LookupFunction { - newConnectionPooler := newSpec.Spec.ConnectionPooler - - specSchema := "" - specUser := "" - - if newConnectionPooler != nil { - specSchema = newConnectionPooler.Schema - specUser = newConnectionPooler.User - } - - schema := util.Coalesce( - specSchema, - c.OpConfig.ConnectionPooler.Schema) - - user := util.Coalesce( - specUser, - c.OpConfig.ConnectionPooler.User) - - if err = lookup(schema, user); err != nil { - return NoSync, err - } - } - - if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec); err != nil { - c.logger.Errorf("could not sync connection pooler: %v", err) - return reason, err - } - } - - if oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources - if err = c.deleteConnectionPooler(); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } - - if !oldNeedConnectionPooler && !newNeedConnectionPooler { - // delete and cleanup resources if not empty - if c.ConnectionPooler != nil && - (c.ConnectionPooler.Deployment != nil || - c.ConnectionPooler.Service != nil) { - - if err = c.deleteConnectionPooler(); err != nil { - c.logger.Warningf("could not remove connection pooler: %v", err) - } - } - } - - return reason, nil -} - -// Synchronize connection pooler resources. Effectively we're interested only in -// synchronizing the corresponding deployment, but in case of deployment or -// service is missing, create it. After checking, also remember an object for -// the future references. -func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql) ( - SyncReason, error) { - - deployment, err := c.KubeClient. - Deployments(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Deployment %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName()) - - deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec) - if err != nil { - msg = "could not generate deployment for connection pooler: %v" - return NoSync, fmt.Errorf(msg, err) - } - - deployment, err := c.KubeClient. - Deployments(deploymentSpec.Namespace). - Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) - - if err != nil { - return NoSync, err - } - - c.ConnectionPooler.Deployment = deployment - } else if err != nil { - msg := "could not get connection pooler deployment to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - c.ConnectionPooler.Deployment = deployment - - // actual synchronization - oldConnectionPooler := oldSpec.Spec.ConnectionPooler - newConnectionPooler := newSpec.Spec.ConnectionPooler - - // sync implementation below assumes that both old and new specs are - // not nil, but it can happen. To avoid any confusion like updating a - // deployment because the specification changed from nil to an empty - // struct (that was initialized somewhere before) replace any nil with - // an empty spec. - if oldConnectionPooler == nil { - oldConnectionPooler = &acidv1.ConnectionPooler{} - } - - if newConnectionPooler == nil { - newConnectionPooler = &acidv1.ConnectionPooler{} - } - - c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) - - specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) - defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) - reason := append(specReason, defaultsReason...) - if specSync || defaultsSync { - c.logger.Infof("Update connection pooler deployment %s, reason: %+v", - c.connectionPoolerName(), reason) - - newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec) - if err != nil { - msg := "could not generate deployment for connection pooler: %v" - return reason, fmt.Errorf(msg, err) - } - - oldDeploymentSpec := c.ConnectionPooler.Deployment - - deployment, err := c.updateConnectionPoolerDeployment( - oldDeploymentSpec, - newDeploymentSpec) - - if err != nil { - return reason, err - } - - c.ConnectionPooler.Deployment = deployment - return reason, nil - } - } - - newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler.Deployment.Annotations) - if newAnnotations != nil { - c.updateConnectionPoolerAnnotations(newAnnotations) - } - - service, err := c.KubeClient. - Services(c.Namespace). - Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{}) - - if err != nil && k8sutil.ResourceNotFound(err) { - msg := "Service %s for connection pooler synchronization is not found, create it" - c.logger.Warningf(msg, c.connectionPoolerName()) - - serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec) - service, err := c.KubeClient. - Services(serviceSpec.Namespace). - Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) - - if err != nil { - return NoSync, err - } - - c.ConnectionPooler.Service = service - } else if err != nil { - msg := "could not get connection pooler service to sync: %v" - return NoSync, fmt.Errorf(msg, err) - } else { - // Service updates are not supported and probably not that useful anyway - c.ConnectionPooler.Service = service - } - - return NoSync, nil -} diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go deleted file mode 100644 index 3a7317938..000000000 --- a/pkg/cluster/sync_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package cluster - -import ( - "fmt" - "strings" - "testing" - - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/util/config" - "github.com/zalando/postgres-operator/pkg/util/k8sutil" - - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func int32ToPointer(value int32) *int32 { - return &value -} - -func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler.Deployment.Spec.Replicas == nil || - *cluster.ConnectionPooler.Deployment.Spec.Replicas != 2 { - return fmt.Errorf("Wrong nubmer of instances") - } - - return nil -} - -func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler == nil { - return fmt.Errorf("Connection pooler resources are empty") - } - - if cluster.ConnectionPooler.Deployment == nil { - return fmt.Errorf("Deployment was not saved") - } - - if cluster.ConnectionPooler.Service == nil { - return fmt.Errorf("Service was not saved") - } - - return nil -} - -func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { - if cluster.ConnectionPooler != nil { - return fmt.Errorf("Connection pooler was not deleted") - } - - return nil -} - -func noEmptySync(cluster *Cluster, err error, reason SyncReason) error { - for _, msg := range reason { - if strings.HasPrefix(msg, "update [] from '' to '") { - return fmt.Errorf("There is an empty reason, %s", msg) - } - } - - return nil -} - -func TestConnectionPoolerSynchronization(t *testing.T) { - testName := "Test connection pooler synchronization" - var cluster = New( - Config{ - OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, - Auth: config.Auth{ - SuperUsername: superUserName, - ReplicationUsername: replicationUserName, - }, - ConnectionPooler: config.ConnectionPooler{ - ConnectionPoolerDefaultCPURequest: "100m", - ConnectionPoolerDefaultCPULimit: "100m", - ConnectionPoolerDefaultMemoryRequest: "100Mi", - ConnectionPoolerDefaultMemoryLimit: "100Mi", - NumberOfInstances: int32ToPointer(1), - }, - }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) - - cluster.Statefulset = &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-sts", - }, - } - - clusterMissingObjects := *cluster - clusterMissingObjects.KubeClient = k8sutil.ClientMissingObjects() - - clusterMock := *cluster - clusterMock.KubeClient = k8sutil.NewMockKubernetesClient() - - clusterDirtyMock := *cluster - clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient() - clusterDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{ - Deployment: &appsv1.Deployment{}, - Service: &v1.Service{}, - } - - clusterNewDefaultsMock := *cluster - clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient() - - tests := []struct { - subTest string - oldSpec *acidv1.Postgresql - newSpec *acidv1.Postgresql - cluster *Cluster - defaultImage string - defaultInstances int32 - check func(cluster *Cluster, err error, reason SyncReason) error - }{ - { - subTest: "create if doesn't exist", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - cluster: &clusterMissingObjects, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: objectsAreSaved, - }, - { - subTest: "create if doesn't exist with a flag", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - }, - }, - cluster: &clusterMissingObjects, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: objectsAreSaved, - }, - { - subTest: "create from scratch", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - cluster: &clusterMissingObjects, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: objectsAreSaved, - }, - { - subTest: "delete if not needed", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - cluster: &clusterMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: objectsAreDeleted, - }, - { - subTest: "cleanup if still there", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{}, - }, - cluster: &clusterDirtyMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: objectsAreDeleted, - }, - { - subTest: "update deployment", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{ - NumberOfInstances: int32ToPointer(1), - }, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{ - NumberOfInstances: int32ToPointer(2), - }, - }, - }, - cluster: &clusterMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: deploymentUpdated, - }, - { - subTest: "update image from changed defaults", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - cluster: &clusterNewDefaultsMock, - defaultImage: "pooler:2.0", - defaultInstances: 2, - check: deploymentUpdated, - }, - { - subTest: "there is no sync from nil to an empty spec", - oldSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - ConnectionPooler: nil, - }, - }, - newSpec: &acidv1.Postgresql{ - Spec: acidv1.PostgresSpec{ - EnableConnectionPooler: boolToPointer(true), - ConnectionPooler: &acidv1.ConnectionPooler{}, - }, - }, - cluster: &clusterMock, - defaultImage: "pooler:1.0", - defaultInstances: 1, - check: noEmptySync, - }, - } - for _, tt := range tests { - tt.cluster.OpConfig.ConnectionPooler.Image = tt.defaultImage - tt.cluster.OpConfig.ConnectionPooler.NumberOfInstances = - int32ToPointer(tt.defaultInstances) - - reason, err := tt.cluster.syncConnectionPooler(tt.oldSpec, - tt.newSpec, mockInstallLookupFunction) - - if err := tt.check(tt.cluster, err, reason); err != nil { - t.Errorf("%s [%s]: Could not synchronize, %+v", - testName, tt.subTest, err) - } - } -} diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index 199914ccc..8aa519817 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -72,7 +72,7 @@ type ClusterStatus struct { type TemplateParams map[string]interface{} -type InstallFunction func(schema string, user string) error +type InstallFunction func(schema string, user string, role PostgresRole) error type SyncReason []string diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 7559ce3d4..d5e887656 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -18,12 +18,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "github.com/sirupsen/logrus" acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "github.com/zalando/postgres-operator/pkg/util/nicediff" "github.com/zalando/postgres-operator/pkg/util/retryutil" ) @@ -166,40 +168,59 @@ func (c *Cluster) logPDBChanges(old, new *policybeta1.PodDisruptionBudget, isUpd ) } - c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec)) + logNiceDiff(c.logger, old.Spec, new.Spec) +} + +func logNiceDiff(log *logrus.Entry, old, new interface{}) { + o, erro := json.MarshalIndent(old, "", " ") + n, errn := json.MarshalIndent(new, "", " ") + + if erro != nil || errn != nil { + panic("could not marshal API objects, should not happen") + } + + nice := nicediff.Diff(string(o), string(n), true) + for _, s := range strings.Split(nice, "\n") { + // " is not needed in the value to understand + log.Debugf(strings.ReplaceAll(s, "\"", "")) + } } func (c *Cluster) logStatefulSetChanges(old, new *appsv1.StatefulSet, isUpdate bool, reasons []string) { if isUpdate { - c.logger.Infof("statefulset %q has been changed", util.NameFromMeta(old.ObjectMeta)) + c.logger.Infof("statefulset %s has been changed", util.NameFromMeta(old.ObjectMeta)) } else { - c.logger.Infof("statefulset %q is not in the desired state and needs to be updated", + c.logger.Infof("statefulset %s is not in the desired state and needs to be updated", util.NameFromMeta(old.ObjectMeta), ) } + + logNiceDiff(c.logger, old.Spec, new.Spec) + if !reflect.DeepEqual(old.Annotations, new.Annotations) { - c.logger.Debugf("metadata.annotation diff\n%s\n", util.PrettyDiff(old.Annotations, new.Annotations)) + c.logger.Debugf("metadata.annotation are different") + logNiceDiff(c.logger, old.Annotations, new.Annotations) } - c.logger.Debugf("spec diff between old and new statefulsets: \n%s\n", util.PrettyDiff(old.Spec, new.Spec)) if len(reasons) > 0 { for _, reason := range reasons { - c.logger.Infof("reason: %q", reason) + c.logger.Infof("reason: %s", reason) } } } func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isUpdate bool, reason string) { if isUpdate { - c.logger.Infof("%s service %q has been changed", + c.logger.Infof("%s service %s has been changed", role, util.NameFromMeta(old.ObjectMeta), ) } else { - c.logger.Infof("%s service %q is not in the desired state and needs to be updated", + c.logger.Infof("%s service %s is not in the desired state and needs to be updated", role, util.NameFromMeta(old.ObjectMeta), ) } - c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec)) + + logNiceDiff(c.logger, old.Spec, new.Spec) if reason != "" { c.logger.Infof("reason: %s", reason) @@ -208,7 +229,7 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) { c.logger.Infof("volume specification has been changed") - c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old, new)) + logNiceDiff(c.logger, old, new) } func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { @@ -217,24 +238,64 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { return nil, fmt.Errorf("no teamId specified") } + c.logger.Debugf("fetching possible additional team members for team %q", teamID) + members := []string{} + additionalMembers := c.PgTeamMap[c.Spec.TeamID].AdditionalMembers + for _, member := range additionalMembers { + members = append(members, member) + } + if !c.OpConfig.EnableTeamsAPI { - c.logger.Debugf("team API is disabled, returning empty list of members for team %q", teamID) - return []string{}, nil + c.logger.Debugf("team API is disabled, only returning %d members for team %q", len(members), teamID) + return members, nil } token, err := c.oauthTokenGetter.getOAuthToken() if err != nil { - c.logger.Warnf("could not get oauth token to authenticate to team service API, returning empty list of team members: %v", err) - return []string{}, nil + c.logger.Warnf("could not get oauth token to authenticate to team service API, only returning %d members for team %q: %v", len(members), teamID, err) + return members, nil } teamInfo, err := c.teamsAPIClient.TeamInfo(teamID, token) if err != nil { - c.logger.Warnf("could not get team info for team %q, returning empty list of team members: %v", teamID, err) - return []string{}, nil + c.logger.Warnf("could not get team info for team %q, only returning %d members: %v", teamID, len(members), err) + return members, nil } - return teamInfo.Members, nil + for _, member := range teamInfo.Members { + if !(util.SliceContains(members, member)) { + members = append(members, member) + } + } + + return members, nil +} + +// Returns annotations to be passed to child objects +func (c *Cluster) annotationsSet(annotations map[string]string) map[string]string { + + if annotations == nil { + annotations = make(map[string]string) + } + + pgCRDAnnotations := c.ObjectMeta.Annotations + + // allow to inherit certain labels from the 'postgres' object + if pgCRDAnnotations != nil { + for k, v := range pgCRDAnnotations { + for _, match := range c.OpConfig.InheritedAnnotations { + if k == match { + annotations[k] = v + } + } + } + } + + if len(annotations) > 0 { + return annotations + } + + return nil } func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) { @@ -415,28 +476,6 @@ func (c *Cluster) labelsSelector() *metav1.LabelSelector { } } -// Return connection pooler labels selector, which should from one point of view -// inherit most of the labels from the cluster itself, but at the same time -// have e.g. different `application` label, so that recreatePod operation will -// not interfere with it (it lists all the pods via labels, and if there would -// be no difference, it will recreate also pooler pods). -func (c *Cluster) connectionPoolerLabelsSelector() *metav1.LabelSelector { - connectionPoolerLabels := labels.Set(map[string]string{}) - - extraLabels := labels.Set(map[string]string{ - "connection-pooler": c.connectionPoolerName(), - "application": "db-connection-pooler", - }) - - connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false)) - connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels) - - return &metav1.LabelSelector{ - MatchLabels: connectionPoolerLabels, - MatchExpressions: nil, - } -} - func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) labels.Set { lbls := c.labelsSet(shouldAddExtraLabels) lbls[c.OpConfig.PodRoleLabel] = string(role) @@ -519,18 +558,6 @@ func (c *Cluster) patroniKubernetesUseConfigMaps() bool { return c.OpConfig.KubernetesUseConfigMaps } -func (c *Cluster) needConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { - if spec.EnableConnectionPooler == nil { - return spec.ConnectionPooler != nil - } else { - return *spec.EnableConnectionPooler - } -} - -func (c *Cluster) needConnectionPooler() bool { - return c.needConnectionPoolerWorker(&c.Spec) -} - // Earlier arguments take priority func mergeContainers(containers ...[]v1.Container) ([]v1.Container, []string) { containerNameTaken := map[string]bool{} diff --git a/pkg/cluster/util_test.go b/pkg/cluster/util_test.go new file mode 100644 index 000000000..7afc59f28 --- /dev/null +++ b/pkg/cluster/util_test.go @@ -0,0 +1,141 @@ +package cluster + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" + "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sFake "k8s.io/client-go/kubernetes/fake" +) + +func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset) { + clientSet := k8sFake.NewSimpleClientset() + acidClientSet := fakeacidv1.NewSimpleClientset() + + return k8sutil.KubernetesClient{ + PodDisruptionBudgetsGetter: clientSet.PolicyV1beta1(), + ServicesGetter: clientSet.CoreV1(), + StatefulSetsGetter: clientSet.AppsV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + }, clientSet +} + +func TestInheritedAnnotations(t *testing.T) { + testName := "test inheriting annotations from manifest" + client, _ := newFakeK8sAnnotationsClient() + clusterName := "acid-test-cluster" + namespace := "default" + annotationValue := "acid" + role := Master + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "owned-by": annotationValue, + }, + }, + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + InheritedAnnotations: []string{"owned-by"}, + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + cluster.Name = clusterName + cluster.Namespace = namespace + + // test annotationsSet function + inheritedAnnotations := cluster.annotationsSet(nil) + + listOptions := metav1.ListOptions{ + LabelSelector: cluster.labelsSet(false).String(), + } + + // check statefulset annotations + _, err := cluster.createStatefulSet() + assert.NoError(t, err) + + stsList, err := client.StatefulSets(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + for _, sts := range stsList.Items { + if !(util.MapContains(sts.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: StatefulSet %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + // pod template + if !(util.MapContains(sts.Spec.Template.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: pod template %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + // pvc template + if util.MapContains(sts.Spec.VolumeClaimTemplates[0].Annotations, inheritedAnnotations) { + t.Errorf("%s: PVC template %v not expected to have inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + } + + // check service annotations + cluster.createService(Master) + svcList, err := client.Services(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + for _, svc := range svcList.Items { + if !(util.MapContains(svc.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: Service %v not inherited annotations %#v, got %#v", testName, svc.ObjectMeta.Name, inheritedAnnotations, svc.ObjectMeta.Annotations) + } + } + + // check pod disruption budget annotations + cluster.createPodDisruptionBudget() + pdbList, err := client.PodDisruptionBudgets(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + for _, pdb := range pdbList.Items { + if !(util.MapContains(pdb.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: Pod Disruption Budget %v not inherited annotations %#v, got %#v", testName, pdb.ObjectMeta.Name, inheritedAnnotations, pdb.ObjectMeta.Annotations) + } + } + + // check pooler deployment annotations + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} + cluster.ConnectionPooler[role] = &ConnectionPoolerObjects{ + Name: cluster.connectionPoolerName(role), + ClusterName: cluster.ClusterName, + Namespace: cluster.Namespace, + Role: role, + } + deploy, err := cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[role]) + assert.NoError(t, err) + + if !(util.MapContains(deploy.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: Deployment %v not inherited annotations %#v, got %#v", testName, deploy.ObjectMeta.Name, inheritedAnnotations, deploy.ObjectMeta.Annotations) + } + +} diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index a5bfe6c2d..162d24075 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -15,7 +15,6 @@ import ( "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/filesystems" - "github.com/zalando/postgres-operator/pkg/util/volumes" ) func (c *Cluster) listPersistentVolumeClaims() ([]v1.PersistentVolumeClaim, error) { @@ -52,6 +51,35 @@ func (c *Cluster) deletePersistentVolumeClaims() error { return nil } +func (c *Cluster) resizeVolumeClaims(newVolume acidv1.Volume) error { + c.logger.Debugln("resizing PVCs") + pvcs, err := c.listPersistentVolumeClaims() + if err != nil { + return err + } + newQuantity, err := resource.ParseQuantity(newVolume.Size) + if err != nil { + return fmt.Errorf("could not parse volume size: %v", err) + } + newSize := quantityToGigabyte(newQuantity) + for _, pvc := range pvcs { + volumeSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) + if volumeSize >= newSize { + if volumeSize > newSize { + c.logger.Warningf("cannot shrink persistent volume") + } + continue + } + pvc.Spec.Resources.Requests[v1.ResourceStorage] = newQuantity + c.logger.Debugf("updating persistent volume claim definition for volume %q", pvc.Name) + if _, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}); err != nil { + return fmt.Errorf("could not update persistent volume claim: %q", err) + } + c.logger.Debugf("successfully updated persistent volume claim %q", pvc.Name) + } + return nil +} + func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { result := make([]*v1.PersistentVolume, 0) @@ -90,19 +118,26 @@ func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { } // resizeVolumes resize persistent volumes compatible with the given resizer interface -func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.VolumeResizer) error { - c.setProcessName("resizing volumes") +func (c *Cluster) resizeVolumes() error { + if c.VolumeResizer == nil { + return fmt.Errorf("no volume resizer set for EBS volume handling") + } + c.setProcessName("resizing EBS volumes") + + resizer := c.VolumeResizer var totalIncompatible int - newQuantity, err := resource.ParseQuantity(newVolume.Size) + newQuantity, err := resource.ParseQuantity(c.Spec.Volume.Size) if err != nil { return fmt.Errorf("could not parse volume size: %v", err) } - pvs, newSize, err := c.listVolumesWithManifestSize(newVolume) + + pvs, newSize, err := c.listVolumesWithManifestSize(c.Spec.Volume) if err != nil { return fmt.Errorf("could not list persistent volumes: %v", err) } + for _, pv := range pvs { volumeSize := quantityToGigabyte(pv.Spec.Capacity[v1.ResourceStorage]) if volumeSize >= newSize { @@ -112,45 +147,45 @@ func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.Volu continue } compatible := false - for _, resizer := range resizers { - if !resizer.VolumeBelongsToProvider(pv) { - continue - } - compatible = true - if !resizer.IsConnectedToProvider() { - err := resizer.ConnectToProvider() - if err != nil { - return fmt.Errorf("could not connect to the volume provider: %v", err) - } - defer func() { - if err := resizer.DisconnectFromProvider(); err != nil { - c.logger.Errorf("%v", err) - } - }() - } - awsVolumeID, err := resizer.GetProviderVolumeID(pv) - if err != nil { - return err - } - c.logger.Debugf("updating persistent volume %q to %d", pv.Name, newSize) - if err := resizer.ResizeVolume(awsVolumeID, newSize); err != nil { - return fmt.Errorf("could not resize EBS volume %q: %v", awsVolumeID, err) - } - c.logger.Debugf("resizing the filesystem on the volume %q", pv.Name) - podName := getPodNameFromPersistentVolume(pv) - if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil { - return fmt.Errorf("could not resize the filesystem on pod %q: %v", podName, err) - } - c.logger.Debugf("filesystem resize successful on volume %q", pv.Name) - pv.Spec.Capacity[v1.ResourceStorage] = newQuantity - c.logger.Debugf("updating persistent volume definition for volume %q", pv.Name) - if _, err := c.KubeClient.PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("could not update persistent volume: %q", err) - } - c.logger.Debugf("successfully updated persistent volume %q", pv.Name) + + if !resizer.VolumeBelongsToProvider(pv) { + continue } + compatible = true + if !resizer.IsConnectedToProvider() { + err := resizer.ConnectToProvider() + if err != nil { + return fmt.Errorf("could not connect to the volume provider: %v", err) + } + defer func() { + if err := resizer.DisconnectFromProvider(); err != nil { + c.logger.Errorf("%v", err) + } + }() + } + awsVolumeID, err := resizer.GetProviderVolumeID(pv) + if err != nil { + return err + } + c.logger.Debugf("updating persistent volume %q to %d", pv.Name, newSize) + if err := resizer.ResizeVolume(awsVolumeID, newSize); err != nil { + return fmt.Errorf("could not resize EBS volume %q: %v", awsVolumeID, err) + } + c.logger.Debugf("resizing the filesystem on the volume %q", pv.Name) + podName := getPodNameFromPersistentVolume(pv) + if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil { + return fmt.Errorf("could not resize the filesystem on pod %q: %v", podName, err) + } + c.logger.Debugf("filesystem resize successful on volume %q", pv.Name) + pv.Spec.Capacity[v1.ResourceStorage] = newQuantity + c.logger.Debugf("updating persistent volume definition for volume %q", pv.Name) + if _, err := c.KubeClient.PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil { + return fmt.Errorf("could not update persistent volume: %q", err) + } + c.logger.Debugf("successfully updated persistent volume %q", pv.Name) + if !compatible { - c.logger.Warningf("volume %q is incompatible with all available resizing providers", pv.Name) + c.logger.Warningf("volume %q is incompatible with all available resizing providers, consider switching storage_resize_mode to pvc or off", pv.Name) totalIncompatible++ } } @@ -160,6 +195,25 @@ func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.Volu return nil } +func (c *Cluster) volumeClaimsNeedResizing(newVolume acidv1.Volume) (bool, error) { + newSize, err := resource.ParseQuantity(newVolume.Size) + manifestSize := quantityToGigabyte(newSize) + if err != nil { + return false, fmt.Errorf("could not parse volume size from the manifest: %v", err) + } + pvcs, err := c.listPersistentVolumeClaims() + if err != nil { + return false, fmt.Errorf("could not receive persistent volume claims: %v", err) + } + for _, pvc := range pvcs { + currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) + if currentSize != manifestSize { + return true, nil + } + } + return false, nil +} + func (c *Cluster) volumesNeedResizing(newVolume acidv1.Volume) (bool, error) { vols, manifestSize, err := c.listVolumesWithManifestSize(newVolume) if err != nil { @@ -197,3 +251,61 @@ func getPodNameFromPersistentVolume(pv *v1.PersistentVolume) *spec.NamespacedNam func quantityToGigabyte(q resource.Quantity) int64 { return q.ScaledValue(0) / (1 * constants.Gigabyte) } + +func (c *Cluster) executeEBSMigration() error { + if !c.OpConfig.EnableEBSGp3Migration { + return nil + } + c.logger.Infof("starting EBS gp2 to gp3 migration") + + pvs, _, err := c.listVolumesWithManifestSize(c.Spec.Volume) + if err != nil { + return fmt.Errorf("could not list persistent volumes: %v", err) + } + c.logger.Debugf("found %d volumes, size of known volumes %d", len(pvs), len(c.EBSVolumes)) + + volumeIds := []string{} + var volumeID string + for _, pv := range pvs { + volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + if err != nil { + continue + } + + volumeIds = append(volumeIds, volumeID) + } + + if len(volumeIds) == len(c.EBSVolumes) { + hasGp2 := false + for _, v := range c.EBSVolumes { + if v.VolumeType == "gp2" { + hasGp2 = true + } + } + + if !hasGp2 { + c.logger.Infof("no EBS gp2 volumes left to migrate") + return nil + } + } + + awsVolumes, err := c.VolumeResizer.DescribeVolumes(volumeIds) + if nil != err { + return err + } + + for _, volume := range awsVolumes { + if volume.VolumeType == "gp2" && volume.Size < c.OpConfig.EnableEBSGp3MigrationMaxSize { + c.logger.Infof("modifying EBS volume %s to type gp3 migration (%d)", volume.VolumeID, volume.Size) + err = c.VolumeResizer.ModifyVolume(volume.VolumeID, "gp3", volume.Size, 3000, 125) + if nil != err { + c.logger.Warningf("modifying volume %s failed: %v", volume.VolumeID, err) + } + } else { + c.logger.Debugf("skipping EBS volume %s to type gp3 migration (%d)", volume.VolumeID, volume.Size) + } + c.EBSVolumes[volume.VolumeID] = volume + } + + return nil +} diff --git a/pkg/cluster/volumes_test.go b/pkg/cluster/volumes_test.go new file mode 100644 index 000000000..17fb8a4af --- /dev/null +++ b/pkg/cluster/volumes_test.go @@ -0,0 +1,263 @@ +package cluster + +import ( + "fmt" + "testing" + + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/zalando/postgres-operator/mocks" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/constants" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "github.com/zalando/postgres-operator/pkg/util/volumes" + "k8s.io/client-go/kubernetes/fake" +) + +func newFakeK8sPVCclient() (k8sutil.KubernetesClient, *fake.Clientset) { + clientSet := fake.NewSimpleClientset() + + return k8sutil.KubernetesClient{ + PersistentVolumeClaimsGetter: clientSet.CoreV1(), + PersistentVolumesGetter: clientSet.CoreV1(), + PodsGetter: clientSet.CoreV1(), + }, clientSet +} + +func TestResizeVolumeClaim(t *testing.T) { + testName := "test resizing of persistent volume claims" + client, _ := newFakeK8sPVCclient() + clusterName := "acid-test-cluster" + namespace := "default" + newVolumeSize := "2Gi" + + storage1Gi, err := resource.ParseQuantity("1Gi") + assert.NoError(t, err) + + // new cluster with pvc storage resize mode and configured labels + var cluster = New( + Config{ + OpConfig: config.Config{ + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + }, + StorageResizeMode: "pvc", + }, + }, client, acidv1.Postgresql{}, logger, eventRecorder) + + // set metadata, so that labels will get correct values + cluster.Name = clusterName + cluster.Namespace = namespace + filterLabels := cluster.labelsSet(false) + + // define and create PVCs for 1Gi volumes + pvcList := CreatePVCs(namespace, clusterName, filterLabels, 2, "1Gi") + // add another PVC with different cluster name + pvcList.Items = append(pvcList.Items, CreatePVCs(namespace, clusterName+"-2", labels.Set{}, 1, "1Gi").Items[0]) + + for _, pvc := range pvcList.Items { + cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{}) + } + + // test resizing + cluster.resizeVolumeClaims(acidv1.Volume{Size: newVolumeSize}) + + pvcs, err := cluster.listPersistentVolumeClaims() + assert.NoError(t, err) + + // check if listPersistentVolumeClaims returns only the PVCs matching the filter + if len(pvcs) != len(pvcList.Items)-1 { + t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1) + } + + // check if PVCs were correctly resized + for _, pvc := range pvcs { + newStorageSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) + expectedQuantity, err := resource.ParseQuantity(newVolumeSize) + assert.NoError(t, err) + expectedSize := quantityToGigabyte(expectedQuantity) + if newStorageSize != expectedSize { + t.Errorf("%s: resizing failed, got %v, expected %v", testName, newStorageSize, expectedSize) + } + } + + // check if other PVC was not resized + pvc2, err := cluster.KubeClient.PersistentVolumeClaims(namespace).Get(context.TODO(), constants.DataVolumeName+"-"+clusterName+"-2-0", metav1.GetOptions{}) + assert.NoError(t, err) + unchangedSize := quantityToGigabyte(pvc2.Spec.Resources.Requests[v1.ResourceStorage]) + expectedSize := quantityToGigabyte(storage1Gi) + if unchangedSize != expectedSize { + t.Errorf("%s: volume size changed, got %v, expected %v", testName, unchangedSize, expectedSize) + } +} + +func TestQuantityToGigabyte(t *testing.T) { + tests := []struct { + name string + quantityStr string + expected int64 + }{ + { + "test with 1Gi", + "1Gi", + 1, + }, + { + "test with float", + "1.5Gi", + int64(1), + }, + { + "test with 1000Mi", + "1000Mi", + int64(0), + }, + } + + for _, tt := range tests { + quantity, err := resource.ParseQuantity(tt.quantityStr) + assert.NoError(t, err) + gigabyte := quantityToGigabyte(quantity) + if gigabyte != tt.expected { + t.Errorf("%s: got %v, expected %v", tt.name, gigabyte, tt.expected) + } + } +} + +func CreatePVCs(namespace string, clusterName string, labels labels.Set, n int, size string) v1.PersistentVolumeClaimList { + // define and create PVCs for 1Gi volumes + storage1Gi, _ := resource.ParseQuantity(size) + pvcList := v1.PersistentVolumeClaimList{ + Items: []v1.PersistentVolumeClaim{}, + } + + for i := 0; i < n; i++ { + pvc := v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%d", constants.DataVolumeName, clusterName, i), + Namespace: namespace, + Labels: labels, + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: storage1Gi, + }, + }, + VolumeName: fmt.Sprintf("persistent-volume-%d", i), + }, + } + pvcList.Items = append(pvcList.Items, pvc) + } + + return pvcList +} + +func TestMigrateEBS(t *testing.T) { + client, _ := newFakeK8sPVCclient() + clusterName := "acid-test-cluster" + namespace := "default" + + // new cluster with pvc storage resize mode and configured labels + var cluster = New( + Config{ + OpConfig: config.Config{ + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + }, + StorageResizeMode: "pvc", + EnableEBSGp3Migration: true, + EnableEBSGp3MigrationMaxSize: 1000, + }, + }, client, acidv1.Postgresql{}, logger, eventRecorder) + cluster.Spec.Volume.Size = "1Gi" + + // set metadata, so that labels will get correct values + cluster.Name = clusterName + cluster.Namespace = namespace + filterLabels := cluster.labelsSet(false) + + pvcList := CreatePVCs(namespace, clusterName, filterLabels, 2, "1Gi") + + ps := v1.PersistentVolumeSpec{} + ps.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{} + ps.AWSElasticBlockStore.VolumeID = "aws://eu-central-1b/ebs-volume-1" + + ps2 := v1.PersistentVolumeSpec{} + ps2.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{} + ps2.AWSElasticBlockStore.VolumeID = "aws://eu-central-1b/ebs-volume-2" + + pvList := &v1.PersistentVolumeList{ + Items: []v1.PersistentVolume{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "persistent-volume-0", + }, + Spec: ps, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "persistent-volume-1", + }, + Spec: ps2, + }, + }, + } + + for _, pvc := range pvcList.Items { + cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{}) + } + + for _, pv := range pvList.Items { + cluster.KubeClient.PersistentVolumes().Create(context.TODO(), &pv, metav1.CreateOptions{}) + } + + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-0", + Labels: filterLabels, + }, + Spec: v1.PodSpec{}, + } + + cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + + pod = v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName + "-1", + Labels: filterLabels, + }, + Spec: v1.PodSpec{}, + } + + cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resizer := mocks.NewMockVolumeResizer(ctrl) + + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( + []volumes.VolumeProperties{ + {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 100}, + {VolumeID: "ebs-volume-2", VolumeType: "gp3", Size: 100}}, nil) + + // expect only gp2 volume to be modified + resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq("gp3"), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + + cluster.VolumeResizer = resizer + cluster.executeEBSMigration() +} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 26b6b1b87..0c29275e6 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -1,10 +1,14 @@ package controller import ( + "bytes" "context" + "encoding/json" "fmt" "os" + "strings" "sync" + "time" "github.com/sirupsen/logrus" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" @@ -12,6 +16,7 @@ import ( "github.com/zalando/postgres-operator/pkg/cluster" acidv1informer "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/teams" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" @@ -25,12 +30,14 @@ import ( typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/reference" ) // Controller represents operator controller type Controller struct { - config spec.ControllerConfig - opConfig *config.Config + config spec.ControllerConfig + opConfig *config.Config + pgTeamMap teams.PostgresTeamMap logger *logrus.Entry KubeClient k8sutil.KubernetesClient @@ -51,10 +58,11 @@ type Controller struct { clusterHistory map[spec.NamespacedName]ringlog.RingLogger // history of the cluster changes teamClusters map[string][]spec.NamespacedName - postgresqlInformer cache.SharedIndexInformer - podInformer cache.SharedIndexInformer - nodesInformer cache.SharedIndexInformer - podCh chan cluster.PodEvent + postgresqlInformer cache.SharedIndexInformer + postgresTeamInformer cache.SharedIndexInformer + podInformer cache.SharedIndexInformer + nodesInformer cache.SharedIndexInformer + podCh chan cluster.PodEvent clusterEventQueues []*cache.FIFO // [workerID]Queue lastClusterSyncTime int64 @@ -69,6 +77,13 @@ type Controller struct { // NewController creates a new controller func NewController(controllerConfig *spec.ControllerConfig, controllerId string) *Controller { logger := logrus.New() + if controllerConfig.EnableJsonLogging { + logger.SetFormatter(&logrus.JSONFormatter{}) + } else { + if os.Getenv("LOG_NOQUOTE") != "" { + logger.SetFormatter(&logrus.TextFormatter{PadLevelText: true, DisableQuote: true}) + } + } var myComponentName = "postgres-operator" if controllerId != "" { @@ -76,7 +91,10 @@ func NewController(controllerConfig *spec.ControllerConfig, controllerId string) } eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(logger.Infof) + + // disabling the sending of events also to the logoutput + // the operator currently duplicates a lot of log entries with this setup + // eventBroadcaster.StartLogging(logger.Infof) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: myComponentName}) c := &Controller{ @@ -185,10 +203,18 @@ func (c *Controller) warnOnDeprecatedOperatorParameters() { } } +func compactValue(v string) string { + var compact bytes.Buffer + if err := json.Compact(&compact, []byte(v)); err != nil { + panic("Hard coded json strings broken!") + } + return compact.String() +} + func (c *Controller) initPodServiceAccount() { if c.opConfig.PodServiceAccountDefinition == "" { - c.opConfig.PodServiceAccountDefinition = ` + stringValue := ` { "apiVersion": "v1", "kind": "ServiceAccount", @@ -196,6 +222,9 @@ func (c *Controller) initPodServiceAccount() { "name": "postgres-pod" } }` + + c.opConfig.PodServiceAccountDefinition = compactValue(stringValue) + } // re-uses k8s internal parsing. See k8s client-go issue #193 for explanation @@ -225,7 +254,7 @@ func (c *Controller) initRoleBinding() { // operator binds it to the cluster role with sufficient privileges // we assume the role is created by the k8s administrator if c.opConfig.PodServiceAccountRoleBindingDefinition == "" { - c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(` + stringValue := fmt.Sprintf(` { "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "RoleBinding", @@ -244,6 +273,7 @@ func (c *Controller) initRoleBinding() { } ] }`, c.PodServiceAccount.Name, c.PodServiceAccount.Name, c.PodServiceAccount.Name) + c.opConfig.PodServiceAccountRoleBindingDefinition = compactValue(stringValue) } c.logger.Info("Parse role bindings") // re-uses k8s internal parsing. See k8s client-go issue #193 for explanation @@ -262,7 +292,14 @@ func (c *Controller) initRoleBinding() { } - // actual roles bindings are deployed at the time of Postgres/Spilo cluster creation + // actual roles bindings ar*logrus.Entrye deployed at the time of Postgres/Spilo cluster creation +} + +func logMultiLineConfig(log *logrus.Entry, config string) { + lines := strings.Split(config, "\n") + for _, l := range lines { + log.Infof("%s", l) + } } func (c *Controller) initController() { @@ -290,16 +327,22 @@ func (c *Controller) initController() { c.logger.Fatalf("could not register Postgres CustomResourceDefinition: %v", err) } - c.initPodServiceAccount() c.initSharedInformers() + if c.opConfig.EnablePostgresTeamCRD { + c.loadPostgresTeams() + } else { + c.pgTeamMap = teams.PostgresTeamMap{} + } + if c.opConfig.DebugLogging { c.logger.Logger.Level = logrus.DebugLevel } - c.logger.Infof("config: %s", c.opConfig.MustMarshal()) + logMultiLineConfig(c.logger, c.opConfig.MustMarshal()) - if infraRoles, err := c.getInfrastructureRoles(&c.opConfig.InfrastructureRolesSecretName); err != nil { + roleDefs := c.getInfrastructureRoleDefinitions() + if infraRoles, err := c.getInfrastructureRoles(roleDefs); err != nil { c.logger.Warningf("could not get infrastructure roles: %v", err) } else { c.config.InfrastructureRoles = infraRoles @@ -323,6 +366,7 @@ func (c *Controller) initController() { func (c *Controller) initSharedInformers() { + // Postgresqls c.postgresqlInformer = acidv1informer.NewPostgresqlInformer( c.KubeClient.AcidV1ClientSet, c.opConfig.WatchedNamespace, @@ -335,6 +379,20 @@ func (c *Controller) initSharedInformers() { DeleteFunc: c.postgresqlDelete, }) + // PostgresTeams + if c.opConfig.EnablePostgresTeamCRD { + c.postgresTeamInformer = acidv1informer.NewPostgresTeamInformer( + c.KubeClient.AcidV1ClientSet, + c.opConfig.WatchedNamespace, + constants.QueueResyncPeriodTPR*6, // 30 min + cache.Indexers{}) + + c.postgresTeamInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.postgresTeamAdd, + UpdateFunc: c.postgresTeamUpdate, + }) + } + // Pods podLw := &cache.ListWatch{ ListFunc: c.podListFunc, @@ -395,6 +453,10 @@ func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { go c.apiserver.Run(stopCh, wg) go c.kubeNodesInformer(stopCh, wg) + if c.opConfig.EnablePostgresTeamCRD { + go c.runPostgresTeamInformer(stopCh, wg) + } + c.logger.Info("started working in background") } @@ -410,6 +472,12 @@ func (c *Controller) runPostgresqlInformer(stopCh <-chan struct{}, wg *sync.Wait c.postgresqlInformer.Run(stopCh) } +func (c *Controller) runPostgresTeamInformer(stopCh <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + + c.postgresTeamInformer.Run(stopCh) +} + func queueClusterKey(eventType EventType, uid types.UID) string { return fmt.Sprintf("%s-%s", eventType, uid) } @@ -442,6 +510,47 @@ func (c *Controller) getEffectiveNamespace(namespaceFromEnvironment, namespaceFr return namespace } +// GetReference of Postgres CR object +// i.e. required to emit events to this resource +func (c *Controller) GetReference(postgresql *acidv1.Postgresql) *v1.ObjectReference { + ref, err := reference.GetReference(scheme.Scheme, postgresql) + if err != nil { + c.logger.Errorf("could not get reference for Postgresql CR %v/%v: %v", postgresql.Namespace, postgresql.Name, err) + } + return ref +} + +func (c *Controller) meetsClusterDeleteAnnotations(postgresql *acidv1.Postgresql) error { + + deleteAnnotationDateKey := c.opConfig.DeleteAnnotationDateKey + currentTime := time.Now() + currentDate := currentTime.Format("2006-01-02") // go's reference date + + if deleteAnnotationDateKey != "" { + if deleteDate, ok := postgresql.Annotations[deleteAnnotationDateKey]; ok { + if deleteDate != currentDate { + return fmt.Errorf("annotation %s not matching the current date: got %s, expected %s", deleteAnnotationDateKey, deleteDate, currentDate) + } + } else { + return fmt.Errorf("annotation %s not set in manifest to allow cluster deletion", deleteAnnotationDateKey) + } + } + + deleteAnnotationNameKey := c.opConfig.DeleteAnnotationNameKey + + if deleteAnnotationNameKey != "" { + if clusterName, ok := postgresql.Annotations[deleteAnnotationNameKey]; ok { + if clusterName != postgresql.Name { + return fmt.Errorf("annotation %s not matching the cluster name: got %s, expected %s", deleteAnnotationNameKey, clusterName, postgresql.Name) + } + } else { + return fmt.Errorf("annotation %s not set in manifest to allow cluster deletion", deleteAnnotationNameKey) + } + } + + return nil +} + // hasOwnership returns true if the controller is the "owner" of the postgresql. // Whether it's owner is determined by the value of 'acid.zalan.do/controller' // annotation. If the value matches the controllerID then it owns it, or if the diff --git a/pkg/controller/node.go b/pkg/controller/node.go index be41b79ab..2836b4f7f 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -42,7 +42,7 @@ func (c *Controller) nodeAdd(obj interface{}) { return } - c.logger.Debugf("new node has been added: %q (%s)", util.NameFromMeta(node.ObjectMeta), node.Spec.ProviderID) + c.logger.Debugf("new node has been added: %s (%s)", util.NameFromMeta(node.ObjectMeta), node.Spec.ProviderID) // check if the node became not ready while the operator was down (otherwise we would have caught it in nodeUpdate) if !c.nodeIsReady(node) { @@ -76,7 +76,7 @@ func (c *Controller) nodeUpdate(prev, cur interface{}) { } func (c *Controller) nodeIsReady(node *v1.Node) bool { - return (!node.Spec.Unschedulable || util.MapContains(node.Labels, c.opConfig.NodeReadinessLabel) || + return (!node.Spec.Unschedulable || (len(c.opConfig.NodeReadinessLabel) > 0 && util.MapContains(node.Labels, c.opConfig.NodeReadinessLabel)) || util.MapContains(node.Labels, map[string]string{"master": "true"})) } diff --git a/pkg/controller/node_test.go b/pkg/controller/node_test.go index 28e178bfb..a9616e256 100644 --- a/pkg/controller/node_test.go +++ b/pkg/controller/node_test.go @@ -15,7 +15,6 @@ const ( func newNodeTestController() *Controller { var controller = NewController(&spec.ControllerConfig{}, "node-test") - controller.opConfig.NodeReadinessLabel = map[string]string{readyLabel: readyValue} return controller } @@ -36,29 +35,60 @@ var nodeTestController = newNodeTestController() func TestNodeIsReady(t *testing.T) { testName := "TestNodeIsReady" var testTable = []struct { - in *v1.Node - out bool + in *v1.Node + out bool + readinessLabel map[string]string }{ { - in: makeNode(map[string]string{"foo": "bar"}, true), - out: true, + in: makeNode(map[string]string{"foo": "bar"}, true), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, }, { - in: makeNode(map[string]string{"foo": "bar"}, false), - out: false, + in: makeNode(map[string]string{"foo": "bar"}, false), + out: false, + readinessLabel: map[string]string{readyLabel: readyValue}, }, { - in: makeNode(map[string]string{readyLabel: readyValue}, false), - out: true, + in: makeNode(map[string]string{readyLabel: readyValue}, false), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, }, { - in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), - out: true, + in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, + }, + { + in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, + }, + { + in: makeNode(map[string]string{"foo": "bar"}, true), + out: true, + readinessLabel: map[string]string{}, + }, + { + in: makeNode(map[string]string{"foo": "bar"}, false), + out: false, + readinessLabel: map[string]string{}, + }, + { + in: makeNode(map[string]string{readyLabel: readyValue}, false), + out: false, + readinessLabel: map[string]string{}, + }, + { + in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), + out: true, + readinessLabel: map[string]string{}, }, } for _, tt := range testTable { + nodeTestController.opConfig.NodeReadinessLabel = tt.readinessLabel if isReady := nodeTestController.nodeIsReady(tt.in); isReady != tt.out { - t.Errorf("%s: expected response %t doesn't match the actual %t for the node %#v", + t.Errorf("%s: expected response %t does not match the actual %t for the node %#v", testName, tt.out, isReady, tt.in) } } diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 41d701fe2..a55dab2d8 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -15,7 +15,7 @@ import ( func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*acidv1.OperatorConfiguration, error) { - config, err := c.KubeClient.AcidV1ClientSet.AcidV1().OperatorConfigurations(configObjectNamespace).Get( + config, err := c.KubeClient.OperatorConfigurationsGetter.OperatorConfigurations(configObjectNamespace).Get( context.TODO(), configObjectName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("could not get operator configuration object %q: %v", configObjectName, err) @@ -35,14 +35,16 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur // general config result.EnableCRDValidation = util.CoalesceBool(fromCRD.EnableCRDValidation, util.True()) result.EnableLazySpiloUpgrade = fromCRD.EnableLazySpiloUpgrade + result.EnablePgVersionEnvVar = fromCRD.EnablePgVersionEnvVar + result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat result.EtcdHost = fromCRD.EtcdHost result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps - result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-12:1.6-p3") - result.Workers = fromCRD.Workers + result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.0-p2") + result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8) result.MinInstances = fromCRD.MinInstances result.MaxInstances = fromCRD.MaxInstances - result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod) - result.RepairPeriod = time.Duration(fromCRD.RepairPeriod) + result.ResyncPeriod = util.CoalesceDuration(time.Duration(fromCRD.ResyncPeriod), "30m") + result.RepairPeriod = util.CoalesceDuration(time.Duration(fromCRD.RepairPeriod), "5m") result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit result.ShmVolume = util.CoalesceBool(fromCRD.ShmVolume, util.True()) result.SidecarImages = fromCRD.SidecarImages @@ -58,27 +60,49 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition result.PodEnvironmentConfigMap = fromCRD.Kubernetes.PodEnvironmentConfigMap - result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod) + result.PodEnvironmentSecret = fromCRD.Kubernetes.PodEnvironmentSecret + result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m") result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged + result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser + result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local") result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat result.EnablePodDisruptionBudget = util.CoalesceBool(fromCRD.Kubernetes.EnablePodDisruptionBudget, util.True()) + result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "pvc") result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True()) result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True()) result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName + result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName + if fromCRD.Kubernetes.InfrastructureRolesDefs != nil { + result.InfrastructureRoles = []*config.InfrastructureRole{} + for _, secret := range fromCRD.Kubernetes.InfrastructureRolesDefs { + result.InfrastructureRoles = append( + result.InfrastructureRoles, + &config.InfrastructureRole{ + SecretName: secret.SecretName, + UserKey: secret.UserKey, + RoleKey: secret.RoleKey, + PasswordKey: secret.PasswordKey, + }) + } + } + result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role") - result.ClusterLabels = fromCRD.Kubernetes.ClusterLabels + result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"}) result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels + result.InheritedAnnotations = fromCRD.Kubernetes.InheritedAnnotations result.DownscalerAnnotations = fromCRD.Kubernetes.DownscalerAnnotations result.ClusterNameLabel = util.Coalesce(fromCRD.Kubernetes.ClusterNameLabel, "cluster-name") + result.DeleteAnnotationDateKey = fromCRD.Kubernetes.DeleteAnnotationDateKey + result.DeleteAnnotationNameKey = fromCRD.Kubernetes.DeleteAnnotationNameKey result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready") - result.MasterPodMoveTimeout = time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout) + result.MasterPodMoveTimeout = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout), "10m") result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity result.PodAntiAffinityTopologyKey = util.Coalesce(fromCRD.Kubernetes.PodAntiAffinityTopologyKey, "kubernetes.io/hostname") @@ -91,38 +115,45 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.MinMemoryLimit = util.Coalesce(fromCRD.PostgresPodResources.MinMemoryLimit, "250Mi") // timeout config - result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval) - result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout) - result.PodLabelWaitTimeout = time.Duration(fromCRD.Timeouts.PodLabelWaitTimeout) - result.PodDeletionWaitTimeout = time.Duration(fromCRD.Timeouts.PodDeletionWaitTimeout) - result.ReadyWaitInterval = time.Duration(fromCRD.Timeouts.ReadyWaitInterval) - result.ReadyWaitTimeout = time.Duration(fromCRD.Timeouts.ReadyWaitTimeout) + result.ResourceCheckInterval = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ResourceCheckInterval), "3s") + result.ResourceCheckTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ResourceCheckTimeout), "10m") + result.PodLabelWaitTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.PodLabelWaitTimeout), "10m") + result.PodDeletionWaitTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.PodDeletionWaitTimeout), "10m") + result.ReadyWaitInterval = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ReadyWaitInterval), "4s") + result.ReadyWaitTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ReadyWaitTimeout), "30s") // load balancer config - result.DbHostedZone = fromCRD.LoadBalancer.DbHostedZone + result.DbHostedZone = util.Coalesce(fromCRD.LoadBalancer.DbHostedZone, "db.example.com") result.EnableMasterLoadBalancer = fromCRD.LoadBalancer.EnableMasterLoadBalancer result.EnableReplicaLoadBalancer = fromCRD.LoadBalancer.EnableReplicaLoadBalancer result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat + result.ExternalTrafficPolicy = util.Coalesce(fromCRD.LoadBalancer.ExternalTrafficPolicy, "Cluster") // AWS or GCP config result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket result.AWSRegion = fromCRD.AWSGCP.AWSRegion result.LogS3Bucket = fromCRD.AWSGCP.LogS3Bucket result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole + result.WALGSBucket = fromCRD.AWSGCP.WALGSBucket + result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount - result.AdditionalSecretMountPath = fromCRD.AWSGCP.AdditionalSecretMountPath + result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials") + result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration + result.EnableEBSGp3MigrationMaxSize = fromCRD.AWSGCP.EnableEBSGp3MigrationMaxSize // logical backup config result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *") result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup") + result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3") result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region result.LogicalBackupS3Endpoint = fromCRD.LogicalBackup.S3Endpoint result.LogicalBackupS3AccessKeyID = fromCRD.LogicalBackup.S3AccessKeyID result.LogicalBackupS3SecretAccessKey = fromCRD.LogicalBackup.S3SecretAccessKey result.LogicalBackupS3SSE = fromCRD.LogicalBackup.S3SSE + result.LogicalBackupGoogleApplicationCredentials = fromCRD.LogicalBackup.GoogleApplicationCredentials // debug config result.DebugLogging = fromCRD.OperatorDebug.DebugLogging @@ -130,20 +161,22 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur // Teams API config result.EnableTeamsAPI = fromCRD.TeamsAPI.EnableTeamsAPI - result.TeamsAPIUrl = fromCRD.TeamsAPI.TeamsAPIUrl - result.TeamAPIRoleConfiguration = fromCRD.TeamsAPI.TeamAPIRoleConfiguration + result.TeamsAPIUrl = util.Coalesce(fromCRD.TeamsAPI.TeamsAPIUrl, "https://teams.example.com/api/") + result.TeamAPIRoleConfiguration = util.CoalesceStrMap(fromCRD.TeamsAPI.TeamAPIRoleConfiguration, map[string]string{"log_statement": "all"}) result.EnableTeamSuperuser = fromCRD.TeamsAPI.EnableTeamSuperuser result.EnableAdminRoleForUsers = fromCRD.TeamsAPI.EnableAdminRoleForUsers result.TeamAdminRole = fromCRD.TeamsAPI.TeamAdminRole - result.PamRoleName = fromCRD.TeamsAPI.PamRoleName - result.PamConfiguration = fromCRD.TeamsAPI.PamConfiguration - result.ProtectedRoles = fromCRD.TeamsAPI.ProtectedRoles + result.PamRoleName = util.Coalesce(fromCRD.TeamsAPI.PamRoleName, "zalandos") + result.PamConfiguration = util.Coalesce(fromCRD.TeamsAPI.PamConfiguration, "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees") + result.ProtectedRoles = util.CoalesceStrArr(fromCRD.TeamsAPI.ProtectedRoles, []string{"admin"}) result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams + result.EnablePostgresTeamCRD = fromCRD.TeamsAPI.EnablePostgresTeamCRD + result.EnablePostgresTeamCRDSuperusers = fromCRD.TeamsAPI.EnablePostgresTeamCRDSuperusers // logging REST API config - result.APIPort = fromCRD.LoggingRESTAPI.APIPort - result.RingLogLines = fromCRD.LoggingRESTAPI.RingLogLines - result.ClusterHistoryEntries = fromCRD.LoggingRESTAPI.ClusterHistoryEntries + result.APIPort = util.CoalesceInt(fromCRD.LoggingRESTAPI.APIPort, 8080) + result.RingLogLines = util.CoalesceInt(fromCRD.LoggingRESTAPI.RingLogLines, 100) + result.ClusterHistoryEntries = util.CoalesceInt(fromCRD.LoggingRESTAPI.ClusterHistoryEntries, 1000) // Scalyr config result.ScalyrAPIKey = fromCRD.Scalyr.ScalyrAPIKey diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index c243f330f..0fe0c1120 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -2,6 +2,7 @@ package controller import ( "context" + "encoding/json" "fmt" "reflect" "strings" @@ -45,7 +46,7 @@ func (c *Controller) listClusters(options metav1.ListOptions) (*acidv1.Postgresq var pgList acidv1.PostgresqlList // TODO: use the SharedInformer cache instead of quering Kubernetes API directly. - list, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.opConfig.WatchedNamespace).List(context.TODO(), options) + list, err := c.KubeClient.PostgresqlsGetter.Postgresqls(c.opConfig.WatchedNamespace).List(context.TODO(), options) if err != nil { c.logger.Errorf("could not list postgresql objects: %v", err) } @@ -224,11 +225,11 @@ func (c *Controller) processEvent(event ClusterEvent) { switch event.EventType { case EventAdd: if clusterFound { - lg.Debugf("cluster already exists") + lg.Infof("Recieved add event for already existing Postgres cluster") return } - lg.Infof("creation of the cluster started") + lg.Infof("creating a new Postgres cluster") cl = c.addCluster(lg, clusterName, event.NewSpec) @@ -420,15 +421,42 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. clusterError = informerNewSpec.Error } + // only allow deletion if delete annotations are set and conditions are met + if eventType == EventDelete { + if err := c.meetsClusterDeleteAnnotations(informerOldSpec); err != nil { + c.logger.WithField("cluster-name", clusterName).Warnf( + "ignoring %q event for cluster %q - manifest does not fulfill delete requirements: %s", eventType, clusterName, err) + c.logger.WithField("cluster-name", clusterName).Warnf( + "please, recreate Postgresql resource %q and set annotations to delete properly", clusterName) + if currentManifest, marshalErr := json.Marshal(informerOldSpec); marshalErr != nil { + c.logger.WithField("cluster-name", clusterName).Warnf("could not marshal current manifest:\n%+v", informerOldSpec) + } else { + c.logger.WithField("cluster-name", clusterName).Warnf("%s\n", string(currentManifest)) + } + return + } + } + if clusterError != "" && eventType != EventDelete { - c.logger. - WithField("cluster-name", clusterName). - Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) + c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) + + switch eventType { + case EventAdd: + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed) + c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError) + case EventUpdate: + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed) + c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError) + default: + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed) + c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError) + } + return } // Don't pass the spec directly from the informer, since subsequent modifications of it would be reflected - // in the informer internal state, making it incohherent with the actual Kubernetes object (and, as a side + // in the informer internal state, making it incoherent with the actual Kubernetes object (and, as a side // effect, the modified state will be returned together with subsequent events). workerID := c.clusterWorkerID(clusterName) @@ -445,7 +473,7 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. if err := c.clusterEventQueues[workerID].Add(clusterEvent); err != nil { lg.Errorf("error while queueing cluster event: %v", clusterEvent) } - lg.Infof("%q event has been queued", eventType) + lg.Infof("%s event has been queued", eventType) if eventType != EventDelete { return @@ -466,7 +494,7 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. if err != nil { lg.Warningf("could not delete event from the queue: %v", err) } else { - lg.Debugf("event %q has been discarded for the cluster", evType) + lg.Debugf("event %s has been discarded for the cluster", evType) } } } diff --git a/pkg/controller/postgresql_test.go b/pkg/controller/postgresql_test.go index b36519c5a..71d23a264 100644 --- a/pkg/controller/postgresql_test.go +++ b/pkg/controller/postgresql_test.go @@ -1,8 +1,10 @@ package controller import ( + "fmt" "reflect" "testing" + "time" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" @@ -90,3 +92,88 @@ func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) { } } } + +func TestMeetsClusterDeleteAnnotations(t *testing.T) { + // set delete annotations in configuration + postgresqlTestController.opConfig.DeleteAnnotationDateKey = "delete-date" + postgresqlTestController.opConfig.DeleteAnnotationNameKey = "delete-clustername" + + currentTime := time.Now() + today := currentTime.Format("2006-01-02") // go's reference date + clusterName := "acid-test-cluster" + + tests := []struct { + name string + pg *acidv1.Postgresql + error string + }{ + { + "Postgres cluster with matching delete annotations", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "delete-date": today, + "delete-clustername": clusterName, + }, + }, + }, + "", + }, + { + "Postgres cluster with violated delete date annotation", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "delete-date": "2020-02-02", + "delete-clustername": clusterName, + }, + }, + }, + fmt.Sprintf("annotation delete-date not matching the current date: got 2020-02-02, expected %s", today), + }, + { + "Postgres cluster with violated delete cluster name annotation", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "delete-date": today, + "delete-clustername": "acid-minimal-cluster", + }, + }, + }, + fmt.Sprintf("annotation delete-clustername not matching the cluster name: got acid-minimal-cluster, expected %s", clusterName), + }, + { + "Postgres cluster with missing delete annotations", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{}, + }, + }, + "annotation delete-date not set in manifest to allow cluster deletion", + }, + { + "Postgres cluster with missing delete cluster name annotation", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "delete-date": today, + }, + }, + }, + "annotation delete-clustername not set in manifest to allow cluster deletion", + }, + } + for _, tt := range tests { + if err := postgresqlTestController.meetsClusterDeleteAnnotations(tt.pg); err != nil { + if !reflect.DeepEqual(err.Error(), tt.error) { + t.Errorf("Expected error %q, got: %v", tt.error, err) + } + } + } +} diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 511f02823..815bc7b74 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -4,9 +4,10 @@ import ( "context" "encoding/json" "fmt" + "strings" v1 "k8s.io/api/core/v1" - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" @@ -14,6 +15,8 @@ import ( acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/cluster" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/teams" + "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "gopkg.in/yaml.v2" @@ -28,6 +31,7 @@ func (c *Controller) makeClusterConfig() cluster.Config { return cluster.Config{ RestConfig: c.config.RestConfig, OpConfig: config.Copy(c.opConfig), + PgTeamMap: c.pgTeamMap, InfrastructureRoles: infrastructureRoles, PodServiceAccount: c.PodServiceAccount, } @@ -50,7 +54,7 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 { return c.clusterWorkers[clusterName] } -func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefinition) error { +func (c *Controller) createOperatorCRD(crd *apiextv1.CustomResourceDefinition) error { if _, err := c.KubeClient.CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil { if k8sutil.ResourceAlreadyExists(err) { c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name) @@ -78,12 +82,12 @@ func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefiniti for _, cond := range c.Status.Conditions { switch cond.Type { - case apiextv1beta1.Established: - if cond.Status == apiextv1beta1.ConditionTrue { + case apiextv1.Established: + if cond.Status == apiextv1.ConditionTrue { return true, err } - case apiextv1beta1.NamesAccepted: - if cond.Status == apiextv1beta1.ConditionFalse { + case apiextv1.NamesAccepted: + if cond.Status == apiextv1.ConditionFalse { return false, fmt.Errorf("name conflict: %v", cond.Reason) } } @@ -109,8 +113,163 @@ func readDecodedRole(s string) (*spec.PgUser, error) { return &result, nil } -func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (map[string]spec.PgUser, error) { - if *rolesSecret == (spec.NamespacedName{}) { +var emptyName = (spec.NamespacedName{}) + +// Return information about what secrets we need to use to create +// infrastructure roles and in which format are they. This is done in +// compatible way, so that the previous logic is not changed, and handles both +// configuration in ConfigMap & CRD. +func (c *Controller) getInfrastructureRoleDefinitions() []*config.InfrastructureRole { + var roleDef config.InfrastructureRole + + // take from CRD configuration + rolesDefs := c.opConfig.InfrastructureRoles + + // check if we can extract something from the configmap config option + if c.opConfig.InfrastructureRolesDefs != "" { + // The configmap option could contain either a role description (in the + // form key1: value1, key2: value2), which has to be used together with + // an old secret name. + + var secretName spec.NamespacedName + var err error + propertySep := "," + valueSep := ":" + + // The field contains the format in which secret is written, let's + // convert it to a proper definition + properties := strings.Split(c.opConfig.InfrastructureRolesDefs, propertySep) + roleDef = config.InfrastructureRole{Template: false} + + for _, property := range properties { + values := strings.Split(property, valueSep) + if len(values) < 2 { + continue + } + name := strings.TrimSpace(values[0]) + value := strings.TrimSpace(values[1]) + + switch name { + case "secretname": + if err = secretName.DecodeWorker(value, "default"); err != nil { + c.logger.Warningf("Could not marshal secret name %s: %v", value, err) + } else { + roleDef.SecretName = secretName + } + case "userkey": + roleDef.UserKey = value + case "passwordkey": + roleDef.PasswordKey = value + case "rolekey": + roleDef.RoleKey = value + case "defaultuservalue": + roleDef.DefaultUserValue = value + case "defaultrolevalue": + roleDef.DefaultRoleValue = value + default: + c.logger.Warningf("Role description is not known: %s", properties) + } + } + + if roleDef.SecretName != emptyName && + (roleDef.UserKey != "" || roleDef.DefaultUserValue != "") && + roleDef.PasswordKey != "" { + rolesDefs = append(rolesDefs, &roleDef) + } + } + + if c.opConfig.InfrastructureRolesSecretName != emptyName { + // At this point we deal with the old format, let's replicate it + // via existing definition structure and remember that it's just a + // template, the real values are in user1,password1,inrole1 etc. + rolesDefs = append(rolesDefs, &config.InfrastructureRole{ + SecretName: c.opConfig.InfrastructureRolesSecretName, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }) + } + + return rolesDefs +} + +func (c *Controller) getInfrastructureRoles( + rolesSecrets []*config.InfrastructureRole) ( + map[string]spec.PgUser, []error) { + + var errors []error + var noRolesProvided = true + + roles := []spec.PgUser{} + uniqRoles := map[string]spec.PgUser{} + + // To be compatible with the legacy implementation we need to return nil if + // the provided secret name is empty. The equivalent situation in the + // current implementation is an empty rolesSecrets slice or all its items + // are empty. + for _, role := range rolesSecrets { + if role.SecretName != emptyName { + noRolesProvided = false + } + } + + if noRolesProvided { + return nil, nil + } + + for _, secret := range rolesSecrets { + infraRoles, err := c.getInfrastructureRole(secret) + + if err != nil || infraRoles == nil { + c.logger.Debugf("Cannot get infrastructure role: %+v", *secret) + + if err != nil { + errors = append(errors, err) + } + + continue + } + + for _, r := range infraRoles { + roles = append(roles, r) + } + } + + for _, r := range roles { + if _, exists := uniqRoles[r.Name]; exists { + msg := "Conflicting infrastructure roles: roles[%s] = (%q, %q)" + c.logger.Debugf(msg, r.Name, uniqRoles[r.Name], r) + } + + uniqRoles[r.Name] = r + } + + return uniqRoles, errors +} + +// Generate list of users representing one infrastructure role based on its +// description in various K8S objects. An infrastructure role could be +// described by a secret and optionally a config map. The former should contain +// the secret information, i.e. username, password, role. The latter could +// contain an extensive description of the role and even override an +// information obtained from the secret (except a password). +// +// This function returns a list of users to be compatible with the previous +// behaviour, since we don't know how many users are actually encoded in the +// secret if it's a "template" role. If the provided role is not a template +// one, the result would be a list with just one user in it. +// +// FIXME: This dependency on two different objects is rather unnecessary +// complicated, so let's get rid of it via deprecation process. +func (c *Controller) getInfrastructureRole( + infraRole *config.InfrastructureRole) ( + []spec.PgUser, error) { + + rolesSecret := infraRole.SecretName + roles := []spec.PgUser{} + + if rolesSecret == emptyName { // we don't have infrastructure roles defined, bail out return nil, nil } @@ -119,52 +278,97 @@ func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (m Secrets(rolesSecret.Namespace). Get(context.TODO(), rolesSecret.Name, metav1.GetOptions{}) if err != nil { - c.logger.Debugf("infrastructure roles secret name: %q", *rolesSecret) - return nil, fmt.Errorf("could not get infrastructure roles secret: %v", err) + msg := "could not get infrastructure roles secret %s/%s: %v" + return nil, fmt.Errorf(msg, rolesSecret.Namespace, rolesSecret.Name, err) } secretData := infraRolesSecret.Data - result := make(map[string]spec.PgUser) -Users: - // in worst case we would have one line per user - for i := 1; i <= len(secretData); i++ { - properties := []string{"user", "password", "inrole"} - t := spec.PgUser{Origin: spec.RoleOriginInfrastructure} - for _, p := range properties { - key := fmt.Sprintf("%s%d", p, i) - if val, present := secretData[key]; !present { - if p == "user" { - // exit when the user name with the next sequence id is absent - break Users - } - } else { - s := string(val) - switch p { - case "user": - t.Name = s - case "password": - t.Password = s - case "inrole": - t.MemberOf = append(t.MemberOf, s) - default: - c.logger.Warningf("unknown key %q", p) - } + + if infraRole.Template { + Users: + for i := 1; i <= len(secretData); i++ { + properties := []string{ + infraRole.UserKey, + infraRole.PasswordKey, + infraRole.RoleKey, } - delete(secretData, key) + t := spec.PgUser{Origin: spec.RoleOriginInfrastructure} + for _, p := range properties { + key := fmt.Sprintf("%s%d", p, i) + if val, present := secretData[key]; !present { + if p == "user" { + // exit when the user name with the next sequence id is + // absent + break Users + } + } else { + s := string(val) + switch p { + case "user": + t.Name = s + case "password": + t.Password = s + case "inrole": + t.MemberOf = append(t.MemberOf, s) + default: + c.logger.Warningf("unknown key %q", p) + } + } + // XXX: This is a part of the original implementation, which is + // rather obscure. Why do we delete this key? Wouldn't it be + // used later in comparison for configmap? + delete(secretData, key) + } + + if t.Valid() { + roles = append(roles, t) + } else { + msg := "infrastructure role %q is not complete and ignored" + c.logger.Warningf(msg, t) + } + } + } else { + roleDescr := &spec.PgUser{Origin: spec.RoleOriginInfrastructure} + + if details, exists := secretData[infraRole.Details]; exists { + if err := yaml.Unmarshal(details, &roleDescr); err != nil { + return nil, fmt.Errorf("could not decode yaml role: %v", err) + } + } else { + roleDescr.Name = util.Coalesce(string(secretData[infraRole.UserKey]), infraRole.DefaultUserValue) + roleDescr.Password = string(secretData[infraRole.PasswordKey]) + roleDescr.MemberOf = append(roleDescr.MemberOf, + util.Coalesce(string(secretData[infraRole.RoleKey]), infraRole.DefaultRoleValue)) } - if t.Name != "" { - if t.Password == "" { - c.logger.Warningf("infrastructure role %q has no password defined and is ignored", t.Name) - continue - } - result[t.Name] = t + if !roleDescr.Valid() { + msg := "infrastructure role %q is not complete and ignored" + c.logger.Warningf(msg, roleDescr) + + return nil, nil } + + if roleDescr.Name == "" { + msg := "infrastructure role %q has no name defined and is ignored" + c.logger.Warningf(msg, roleDescr.Name) + return nil, nil + } + + if roleDescr.Password == "" { + msg := "infrastructure role %q has no password defined and is ignored" + c.logger.Warningf(msg, roleDescr.Name) + return nil, nil + } + + roles = append(roles, *roleDescr) } - // perhaps we have some map entries with usernames, passwords, let's check if we have those users in the configmap - if infraRolesMap, err := c.KubeClient.ConfigMaps(rolesSecret.Namespace).Get( - context.TODO(), rolesSecret.Name, metav1.GetOptions{}); err == nil { + // Now plot twist. We need to check if there is a configmap with the same + // name and extract a role description if it exists. + infraRolesMap, err := c.KubeClient. + ConfigMaps(rolesSecret.Namespace). + Get(context.TODO(), rolesSecret.Name, metav1.GetOptions{}) + if err == nil { // we have a configmap with username - json description, let's read and decode it for role, s := range infraRolesMap.Data { roleDescr, err := readDecodedRole(s) @@ -182,20 +386,43 @@ Users: } roleDescr.Name = role roleDescr.Origin = spec.RoleOriginInfrastructure - result[role] = *roleDescr + roles = append(roles, *roleDescr) } } - if len(secretData) > 0 { - c.logger.Warningf("%d unprocessed entries in the infrastructure roles secret,"+ - " checking configmap %v", len(secretData), rolesSecret.Name) - c.logger.Info(`infrastructure role entries should be in the {key}{id} format,` + - ` where {key} can be either of "user", "password", "inrole" and the {id}` + - ` a monotonically increasing integer starting with 1`) - c.logger.Debugf("unprocessed entries: %#v", secretData) + // TODO: check for role collisions + return roles, nil +} + +func (c *Controller) loadPostgresTeams() { + // reset team map + c.pgTeamMap = teams.PostgresTeamMap{} + + pgTeams, err := c.KubeClient.PostgresTeamsGetter.PostgresTeams(c.opConfig.WatchedNamespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + c.logger.Errorf("could not list postgres team objects: %v", err) } - return result, nil + c.pgTeamMap.Load(pgTeams) + c.logger.Debugf("Internal Postgres Team Cache: %#v", c.pgTeamMap) +} + +func (c *Controller) postgresTeamAdd(obj interface{}) { + pgTeam, ok := obj.(*acidv1.PostgresTeam) + if !ok { + c.logger.Errorf("could not cast to PostgresTeam spec") + } + c.logger.Debugf("PostgreTeam %q added. Reloading postgres team CRDs and overwriting cached map", pgTeam.Name) + c.loadPostgresTeams() +} + +func (c *Controller) postgresTeamUpdate(prev, obj interface{}) { + pgTeam, ok := obj.(*acidv1.PostgresTeam) + if !ok { + c.logger.Errorf("could not cast to PostgresTeam spec") + } + c.logger.Debugf("PostgreTeam %q updated. Reloading postgres team CRDs and overwriting cached map", pgTeam.Name) + c.loadPostgresTeams() } func (c *Controller) podClusterName(pod *v1.Pod) spec.NamespacedName { diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index ef182248e..edc05d67e 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -8,20 +8,25 @@ import ( b64 "encoding/base64" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/k8sutil" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( - testInfrastructureRolesSecretName = "infrastructureroles-test" + testInfrastructureRolesOldSecretName = "infrastructureroles-old-test" + testInfrastructureRolesNewSecretName = "infrastructureroles-new-test" ) func newUtilTestController() *Controller { controller := NewController(&spec.ControllerConfig{}, "util-test") controller.opConfig.ClusterNameLabel = "cluster-name" controller.opConfig.InfrastructureRolesSecretName = - spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesSecretName} + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + } controller.opConfig.Workers = 4 controller.KubeClient = k8sutil.NewMockKubernetesClient() return controller @@ -80,24 +85,32 @@ func TestClusterWorkerID(t *testing.T) { } } -func TestGetInfrastructureRoles(t *testing.T) { +// Test functionality of getting infrastructure roles from their description in +// corresponding secrets. Here we test only common stuff (e.g. when a secret do +// not exist, or empty) and the old format. +func TestOldInfrastructureRoleFormat(t *testing.T) { var testTable = []struct { - secretName spec.NamespacedName - expectedRoles map[string]spec.PgUser - expectedError error + secretName spec.NamespacedName + expectedRoles map[string]spec.PgUser + expectedErrors []error }{ { + // empty secret name spec.NamespacedName{}, nil, nil, }, { + // secret does not exist spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: "null"}, - nil, - fmt.Errorf(`could not get infrastructure roles secret: NotFound`), + map[string]spec.PgUser{}, + []error{fmt.Errorf(`could not get infrastructure roles secret default/null: NotFound`)}, }, { - spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesSecretName}, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, map[string]spec.PgUser{ "testrole": { Name: "testrole", @@ -116,15 +129,354 @@ func TestGetInfrastructureRoles(t *testing.T) { }, } for _, test := range testTable { - roles, err := utilTestController.getInfrastructureRoles(&test.secretName) - if err != test.expectedError { - if err != nil && test.expectedError != nil && err.Error() == test.expectedError.Error() { - continue - } - t.Errorf("expected error '%v' does not match the actual error '%v'", test.expectedError, err) + roles, errors := utilTestController.getInfrastructureRoles( + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: test.secretName, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }, + }) + + if len(errors) != len(test.expectedErrors) { + t.Errorf("expected error '%v' does not match the actual error '%v'", + test.expectedErrors, errors) } + + for idx := range errors { + err := errors[idx] + expectedErr := test.expectedErrors[idx] + + if err != expectedErr { + if err != nil && expectedErr != nil && err.Error() == expectedErr.Error() { + continue + } + t.Errorf("expected error '%v' does not match the actual error '%v'", + expectedErr, err) + } + } + if !reflect.DeepEqual(roles, test.expectedRoles) { - t.Errorf("expected roles output %v does not match the actual %v", test.expectedRoles, roles) + t.Errorf("expected roles output %#v does not match the actual %#v", + test.expectedRoles, roles) + } + } +} + +// Test functionality of getting infrastructure roles from their description in +// corresponding secrets. Here we test the new format. +func TestNewInfrastructureRoleFormat(t *testing.T) { + var testTable = []struct { + secrets []spec.NamespacedName + expectedRoles map[string]spec.PgUser + expectedErrors []error + }{ + // one secret with one configmap + { + []spec.NamespacedName{ + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + }, + map[string]spec.PgUser{ + "new-test-role": { + Name: "new-test-role", + Origin: spec.RoleOriginInfrastructure, + Password: "new-test-password", + MemberOf: []string{"new-test-inrole"}, + }, + "new-foobar": { + Name: "new-foobar", + Origin: spec.RoleOriginInfrastructure, + Password: b64.StdEncoding.EncodeToString([]byte("password")), + MemberOf: nil, + Flags: []string{"createdb"}, + }, + }, + nil, + }, + // multiple standalone secrets + { + []spec.NamespacedName{ + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: "infrastructureroles-new-test1", + }, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: "infrastructureroles-new-test2", + }, + }, + map[string]spec.PgUser{ + "new-test-role1": { + Name: "new-test-role1", + Origin: spec.RoleOriginInfrastructure, + Password: "new-test-password1", + MemberOf: []string{"new-test-inrole1"}, + }, + "new-test-role2": { + Name: "new-test-role2", + Origin: spec.RoleOriginInfrastructure, + Password: "new-test-password2", + MemberOf: []string{"new-test-inrole2"}, + }, + }, + nil, + }, + } + for _, test := range testTable { + definitions := []*config.InfrastructureRole{} + for _, secret := range test.secrets { + definitions = append(definitions, &config.InfrastructureRole{ + SecretName: secret, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: false, + }) + } + + roles, errors := utilTestController.getInfrastructureRoles(definitions) + if len(errors) != len(test.expectedErrors) { + t.Errorf("expected error does not match the actual error:\n%+v\n%+v", + test.expectedErrors, errors) + + // Stop and do not do any further checks + return + } + + for idx := range errors { + err := errors[idx] + expectedErr := test.expectedErrors[idx] + + if err != expectedErr { + if err != nil && expectedErr != nil && err.Error() == expectedErr.Error() { + continue + } + t.Errorf("expected error '%v' does not match the actual error '%v'", + expectedErr, err) + } + } + + if !reflect.DeepEqual(roles, test.expectedRoles) { + t.Errorf("expected roles output/the actual:\n%#v\n%#v", + test.expectedRoles, roles) + } + } +} + +// Tests for getting correct infrastructure roles definitions from present +// configuration. E.g. in which secrets for which roles too look. The biggest +// point here is compatibility of old and new formats of defining +// infrastructure roles. +func TestInfrastructureRoleDefinitions(t *testing.T) { + var testTable = []struct { + rolesDefs []*config.InfrastructureRole + roleSecretName spec.NamespacedName + roleSecrets string + expectedDefs []*config.InfrastructureRole + }{ + // only new CRD format + { + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + }, + spec.NamespacedName{}, + "", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + }, + }, + // only new configmap format + { + []*config.InfrastructureRole{}, + spec.NamespacedName{}, + "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + }, + }, + // new configmap format with defaultRoleValue + { + []*config.InfrastructureRole{}, + spec.NamespacedName{}, + "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, defaultrolevalue: test-role", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + DefaultRoleValue: "test-role", + Template: false, + }, + }, + }, + // only old CRD and configmap format + { + []*config.InfrastructureRole{}, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + "", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }, + }, + }, + // both formats for CRD + { + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + }, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + "", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }, + }, + }, + // both formats for configmap + { + []*config.InfrastructureRole{}, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }, + }, + }, + // incorrect configmap format + { + []*config.InfrastructureRole{}, + spec.NamespacedName{}, + "wrong-format", + []*config.InfrastructureRole{}, + }, + // configmap without a secret + { + []*config.InfrastructureRole{}, + spec.NamespacedName{}, + "userkey: test-user, passwordkey: test-password, rolekey: test-role", + []*config.InfrastructureRole{}, + }, + } + + for _, test := range testTable { + t.Logf("Test: %+v", test) + utilTestController.opConfig.InfrastructureRoles = test.rolesDefs + utilTestController.opConfig.InfrastructureRolesSecretName = test.roleSecretName + utilTestController.opConfig.InfrastructureRolesDefs = test.roleSecrets + + defs := utilTestController.getInfrastructureRoleDefinitions() + if len(defs) != len(test.expectedDefs) { + t.Errorf("expected definitions does not match the actual:\n%#v\n%#v", + test.expectedDefs, defs) + + // Stop and do not do any further checks + return + } + + for idx := range defs { + def := defs[idx] + expectedDef := test.expectedDefs[idx] + + if !reflect.DeepEqual(def, expectedDef) { + t.Errorf("expected definition/the actual:\n%#v\n%#v", + expectedDef, def) + } } } } diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go index 5363e8cc4..c5a24f7da 100644 --- a/pkg/generated/clientset/versioned/fake/register.go +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -35,7 +35,7 @@ import ( var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) + var localSchemeBuilder = runtime.SchemeBuilder{ acidv1.AddToScheme, } diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go index 1879b9514..e48e2d2a7 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go @@ -33,6 +33,7 @@ import ( type AcidV1Interface interface { RESTClient() rest.Interface OperatorConfigurationsGetter + PostgresTeamsGetter PostgresqlsGetter } @@ -45,6 +46,10 @@ func (c *AcidV1Client) OperatorConfigurations(namespace string) OperatorConfigur return newOperatorConfigurations(c, namespace) } +func (c *AcidV1Client) PostgresTeams(namespace string) PostgresTeamInterface { + return newPostgresTeams(c, namespace) +} + func (c *AcidV1Client) Postgresqls(namespace string) PostgresqlInterface { return newPostgresqls(c, namespace) } diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go index 8cd4dc9da..9e31f5192 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go @@ -38,6 +38,10 @@ func (c *FakeAcidV1) OperatorConfigurations(namespace string) v1.OperatorConfigu return &FakeOperatorConfigurations{c, namespace} } +func (c *FakeAcidV1) PostgresTeams(namespace string) v1.PostgresTeamInterface { + return &FakePostgresTeams{c, namespace} +} + func (c *FakeAcidV1) Postgresqls(namespace string) v1.PostgresqlInterface { return &FakePostgresqls{c, namespace} } diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go new file mode 100644 index 000000000..20c8ec809 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go @@ -0,0 +1,136 @@ +/* +Copyright 2020 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePostgresTeams implements PostgresTeamInterface +type FakePostgresTeams struct { + Fake *FakeAcidV1 + ns string +} + +var postgresteamsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "postgresteams"} + +var postgresteamsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "PostgresTeam"} + +// Get takes name of the postgresTeam, and returns the corresponding postgresTeam object, and an error if there is any. +func (c *FakePostgresTeams) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(postgresteamsResource, c.ns, name), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} + +// List takes label and field selectors, and returns the list of PostgresTeams that match those selectors. +func (c *FakePostgresTeams) List(ctx context.Context, opts v1.ListOptions) (result *acidzalandov1.PostgresTeamList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(postgresteamsResource, postgresteamsKind, c.ns, opts), &acidzalandov1.PostgresTeamList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &acidzalandov1.PostgresTeamList{ListMeta: obj.(*acidzalandov1.PostgresTeamList).ListMeta} + for _, item := range obj.(*acidzalandov1.PostgresTeamList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested postgresTeams. +func (c *FakePostgresTeams) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(postgresteamsResource, c.ns, opts)) + +} + +// Create takes the representation of a postgresTeam and creates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *FakePostgresTeams) Create(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts v1.CreateOptions) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(postgresteamsResource, c.ns, postgresTeam), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} + +// Update takes the representation of a postgresTeam and updates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *FakePostgresTeams) Update(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts v1.UpdateOptions) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(postgresteamsResource, c.ns, postgresTeam), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} + +// Delete takes name of the postgresTeam and deletes it. Returns an error if one occurs. +func (c *FakePostgresTeams) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(postgresteamsResource, c.ns, name), &acidzalandov1.PostgresTeam{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePostgresTeams) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(postgresteamsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &acidzalandov1.PostgresTeamList{}) + return err +} + +// Patch applies the patch and returns the patched postgresTeam. +func (c *FakePostgresTeams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(postgresteamsResource, c.ns, name, pt, data, subresources...), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go index fd5707c75..bcd80f922 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go @@ -26,4 +26,6 @@ package v1 type OperatorConfigurationExpansion interface{} +type PostgresTeamExpansion interface{} + type PostgresqlExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go new file mode 100644 index 000000000..82157dceb --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go @@ -0,0 +1,184 @@ +/* +Copyright 2020 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PostgresTeamsGetter has a method to return a PostgresTeamInterface. +// A group's client should implement this interface. +type PostgresTeamsGetter interface { + PostgresTeams(namespace string) PostgresTeamInterface +} + +// PostgresTeamInterface has methods to work with PostgresTeam resources. +type PostgresTeamInterface interface { + Create(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.CreateOptions) (*v1.PostgresTeam, error) + Update(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.UpdateOptions) (*v1.PostgresTeam, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PostgresTeam, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PostgresTeamList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PostgresTeam, err error) + PostgresTeamExpansion +} + +// postgresTeams implements PostgresTeamInterface +type postgresTeams struct { + client rest.Interface + ns string +} + +// newPostgresTeams returns a PostgresTeams +func newPostgresTeams(c *AcidV1Client, namespace string) *postgresTeams { + return &postgresTeams{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the postgresTeam, and returns the corresponding postgresTeam object, and an error if there is any. +func (c *postgresTeams) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Get(). + Namespace(c.ns). + Resource("postgresteams"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PostgresTeams that match those selectors. +func (c *postgresTeams) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PostgresTeamList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PostgresTeamList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested postgresTeams. +func (c *postgresTeams) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a postgresTeam and creates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *postgresTeams) Create(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.CreateOptions) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Post(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(postgresTeam). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a postgresTeam and updates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *postgresTeams) Update(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.UpdateOptions) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Put(). + Namespace(c.ns). + Resource("postgresteams"). + Name(postgresTeam.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(postgresTeam). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the postgresTeam and deletes it. Returns an error if one occurs. +func (c *postgresTeams) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("postgresteams"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *postgresTeams) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched postgresTeam. +func (c *postgresTeams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("postgresteams"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go index 30090afee..b83d4d0f0 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go @@ -30,6 +30,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // PostgresTeams returns a PostgresTeamInformer. + PostgresTeams() PostgresTeamInformer // Postgresqls returns a PostgresqlInformer. Postgresqls() PostgresqlInformer } @@ -45,6 +47,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// PostgresTeams returns a PostgresTeamInformer. +func (v *version) PostgresTeams() PostgresTeamInformer { + return &postgresTeamInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Postgresqls returns a PostgresqlInformer. func (v *version) Postgresqls() PostgresqlInformer { return &postgresqlInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go new file mode 100644 index 000000000..7ae532cbd --- /dev/null +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go @@ -0,0 +1,96 @@ +/* +Copyright 2020 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + versioned "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" + internalinterfaces "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/zalando/postgres-operator/pkg/generated/listers/acid.zalan.do/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PostgresTeamInformer provides access to a shared informer and lister for +// PostgresTeams. +type PostgresTeamInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PostgresTeamLister +} + +type postgresTeamInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPostgresTeamInformer constructs a new informer for PostgresTeam type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPostgresTeamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPostgresTeamInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPostgresTeamInformer constructs a new informer for PostgresTeam type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPostgresTeamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcidV1().PostgresTeams(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcidV1().PostgresTeams(namespace).Watch(context.TODO(), options) + }, + }, + &acidzalandov1.PostgresTeam{}, + resyncPeriod, + indexers, + ) +} + +func (f *postgresTeamInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPostgresTeamInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *postgresTeamInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&acidzalandov1.PostgresTeam{}, f.defaultInformer) +} + +func (f *postgresTeamInformer) Lister() v1.PostgresTeamLister { + return v1.NewPostgresTeamLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 562dec419..7dff3e4e5 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -59,6 +59,8 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=acid.zalan.do, Version=v1 + case v1.SchemeGroupVersion.WithResource("postgresteams"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Acid().V1().PostgresTeams().Informer()}, nil case v1.SchemeGroupVersion.WithResource("postgresqls"): return &genericInformer{resource: resource.GroupResource(), informer: f.Acid().V1().Postgresqls().Informer()}, nil diff --git a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go index 1b96a7c76..81e829926 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go +++ b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go @@ -24,6 +24,14 @@ SOFTWARE. package v1 +// PostgresTeamListerExpansion allows custom methods to be added to +// PostgresTeamLister. +type PostgresTeamListerExpansion interface{} + +// PostgresTeamNamespaceListerExpansion allows custom methods to be added to +// PostgresTeamNamespaceLister. +type PostgresTeamNamespaceListerExpansion interface{} + // PostgresqlListerExpansion allows custom methods to be added to // PostgresqlLister. type PostgresqlListerExpansion interface{} diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go index 9a60c8281..ee3efbdfe 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go @@ -32,8 +32,10 @@ import ( ) // PostgresqlLister helps list Postgresqls. +// All objects returned here must be treated as read-only. type PostgresqlLister interface { // List lists all Postgresqls in the indexer. + // Objects returned here must be treated as read-only. List(selector labels.Selector) (ret []*v1.Postgresql, err error) // Postgresqls returns an object that can list and get Postgresqls. Postgresqls(namespace string) PostgresqlNamespaceLister @@ -64,10 +66,13 @@ func (s *postgresqlLister) Postgresqls(namespace string) PostgresqlNamespaceList } // PostgresqlNamespaceLister helps list and get Postgresqls. +// All objects returned here must be treated as read-only. type PostgresqlNamespaceLister interface { // List lists all Postgresqls in the indexer for a given namespace. + // Objects returned here must be treated as read-only. List(selector labels.Selector) (ret []*v1.Postgresql, err error) // Get retrieves the Postgresql from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. Get(name string) (*v1.Postgresql, error) PostgresqlNamespaceListerExpansion } diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go b/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go new file mode 100644 index 000000000..102dae832 --- /dev/null +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go @@ -0,0 +1,105 @@ +/* +Copyright 2020 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PostgresTeamLister helps list PostgresTeams. +// All objects returned here must be treated as read-only. +type PostgresTeamLister interface { + // List lists all PostgresTeams in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) + // PostgresTeams returns an object that can list and get PostgresTeams. + PostgresTeams(namespace string) PostgresTeamNamespaceLister + PostgresTeamListerExpansion +} + +// postgresTeamLister implements the PostgresTeamLister interface. +type postgresTeamLister struct { + indexer cache.Indexer +} + +// NewPostgresTeamLister returns a new PostgresTeamLister. +func NewPostgresTeamLister(indexer cache.Indexer) PostgresTeamLister { + return &postgresTeamLister{indexer: indexer} +} + +// List lists all PostgresTeams in the indexer. +func (s *postgresTeamLister) List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PostgresTeam)) + }) + return ret, err +} + +// PostgresTeams returns an object that can list and get PostgresTeams. +func (s *postgresTeamLister) PostgresTeams(namespace string) PostgresTeamNamespaceLister { + return postgresTeamNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PostgresTeamNamespaceLister helps list and get PostgresTeams. +// All objects returned here must be treated as read-only. +type PostgresTeamNamespaceLister interface { + // List lists all PostgresTeams in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) + // Get retrieves the PostgresTeam from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.PostgresTeam, error) + PostgresTeamNamespaceListerExpansion +} + +// postgresTeamNamespaceLister implements the PostgresTeamNamespaceLister +// interface. +type postgresTeamNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PostgresTeams in the indexer for a given namespace. +func (s postgresTeamNamespaceLister) List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PostgresTeam)) + }) + return ret, err +} + +// Get retrieves the PostgresTeam from the indexer for a given namespace and name. +func (s postgresTeamNamespaceLister) Get(name string) (*v1.PostgresTeam, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("postgresteam"), name) + } + return obj.(*v1.PostgresTeam), nil +} diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 08008267b..78c79e1b3 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -55,6 +55,10 @@ type PgUser struct { AdminRole string `yaml:"admin_role"` } +func (user *PgUser) Valid() bool { + return user.Name != "" && user.Password != "" +} + // PgUserMap maps user names to the definitions. type PgUserMap map[string]PgUser @@ -110,6 +114,8 @@ type ControllerConfig struct { CRDReadyWaitTimeout time.Duration ConfigMapName NamespacedName Namespace string + + EnableJsonLogging bool } // cached value for the GetOperatorNamespace diff --git a/pkg/teams/postgres_team.go b/pkg/teams/postgres_team.go new file mode 100644 index 000000000..7fb725765 --- /dev/null +++ b/pkg/teams/postgres_team.go @@ -0,0 +1,118 @@ +package teams + +import ( + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util" +) + +// PostgresTeamMap is the operator's internal representation of all PostgresTeam CRDs +type PostgresTeamMap map[string]postgresTeamMembership + +type postgresTeamMembership struct { + AdditionalSuperuserTeams []string + AdditionalTeams []string + AdditionalMembers []string +} + +type teamHashSet map[string]map[string]struct{} + +func (ths *teamHashSet) has(team string) bool { + _, ok := (*ths)[team] + return ok +} + +func (ths *teamHashSet) add(newTeam string, newSet []string) { + set := make(map[string]struct{}) + if ths.has(newTeam) { + set = (*ths)[newTeam] + } + for _, t := range newSet { + set[t] = struct{}{} + } + (*ths)[newTeam] = set +} + +func (ths *teamHashSet) toMap() map[string][]string { + newTeamMap := make(map[string][]string) + for team, items := range *ths { + list := []string{} + for item := range items { + list = append(list, item) + } + newTeamMap[team] = list + } + return newTeamMap +} + +func (ths *teamHashSet) mergeCrdMap(crdTeamMap map[string][]string) { + for t, at := range crdTeamMap { + ths.add(t, at) + } +} + +func fetchTeams(teamset *map[string]struct{}, set teamHashSet) { + for key := range set { + (*teamset)[key] = struct{}{} + } +} + +func (ptm *PostgresTeamMap) fetchAdditionalTeams(team string, superuserTeams bool, transitive bool, exclude []string) []string { + + var teams []string + + if superuserTeams { + teams = (*ptm)[team].AdditionalSuperuserTeams + } else { + teams = (*ptm)[team].AdditionalTeams + } + if transitive { + exclude = append(exclude, team) + for _, additionalTeam := range teams { + if !(util.SliceContains(exclude, additionalTeam)) { + transitiveTeams := (*ptm).fetchAdditionalTeams(additionalTeam, superuserTeams, transitive, exclude) + for _, transitiveTeam := range transitiveTeams { + if !(util.SliceContains(exclude, transitiveTeam)) && !(util.SliceContains(teams, transitiveTeam)) { + teams = append(teams, transitiveTeam) + } + } + } + } + } + + return teams +} + +// GetAdditionalTeams function to retrieve list of additional teams +func (ptm *PostgresTeamMap) GetAdditionalTeams(team string, transitive bool) []string { + return ptm.fetchAdditionalTeams(team, false, transitive, []string{}) +} + +// GetAdditionalSuperuserTeams function to retrieve list of additional superuser teams +func (ptm *PostgresTeamMap) GetAdditionalSuperuserTeams(team string, transitive bool) []string { + return ptm.fetchAdditionalTeams(team, true, transitive, []string{}) +} + +// Load function to import data from PostgresTeam CRD +func (ptm *PostgresTeamMap) Load(pgTeams *acidv1.PostgresTeamList) { + superuserTeamSet := teamHashSet{} + teamSet := teamHashSet{} + teamMemberSet := teamHashSet{} + teamIDs := make(map[string]struct{}) + + for _, pgTeam := range pgTeams.Items { + superuserTeamSet.mergeCrdMap(pgTeam.Spec.AdditionalSuperuserTeams) + teamSet.mergeCrdMap(pgTeam.Spec.AdditionalTeams) + teamMemberSet.mergeCrdMap(pgTeam.Spec.AdditionalMembers) + } + fetchTeams(&teamIDs, superuserTeamSet) + fetchTeams(&teamIDs, teamSet) + fetchTeams(&teamIDs, teamMemberSet) + + for teamID := range teamIDs { + (*ptm)[teamID] = postgresTeamMembership{ + AdditionalSuperuserTeams: util.CoalesceStrArr(superuserTeamSet.toMap()[teamID], []string{}), + AdditionalTeams: util.CoalesceStrArr(teamSet.toMap()[teamID], []string{}), + AdditionalMembers: util.CoalesceStrArr(teamMemberSet.toMap()[teamID], []string{}), + } + } +} diff --git a/pkg/teams/postgres_team_test.go b/pkg/teams/postgres_team_test.go new file mode 100644 index 000000000..f8c3a21d8 --- /dev/null +++ b/pkg/teams/postgres_team_test.go @@ -0,0 +1,194 @@ +package teams + +import ( + "testing" + + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + True = true + False = false + pgTeamList = acidv1.PostgresTeamList{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + Items: []acidv1.PostgresTeam{ + { + TypeMeta: metav1.TypeMeta{ + Kind: "PostgresTeam", + APIVersion: "acid.zalan.do/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "teamAB", + }, + Spec: acidv1.PostgresTeamSpec{ + AdditionalSuperuserTeams: map[string][]string{"teamA": []string{"teamB", "team24x7"}, "teamB": []string{"teamA", "teamC", "team24x7"}}, + AdditionalTeams: map[string][]string{"teamA": []string{"teamC"}, "teamB": []string{}}, + AdditionalMembers: map[string][]string{"team24x7": []string{"optimusprime"}, "teamB": []string{"drno"}}, + }, + }, { + TypeMeta: metav1.TypeMeta{ + Kind: "PostgresTeam", + APIVersion: "acid.zalan.do/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "teamC", + }, + Spec: acidv1.PostgresTeamSpec{ + AdditionalSuperuserTeams: map[string][]string{"teamC": []string{"team24x7"}}, + AdditionalTeams: map[string][]string{"teamA": []string{"teamC"}, "teamC": []string{"teamA", "teamB", "acid"}}, + AdditionalMembers: map[string][]string{"acid": []string{"batman"}}, + }, + }, + }, + } +) + +// PostgresTeamMap is the operator's internal representation of all PostgresTeam CRDs +func TestLoadingPostgresTeamCRD(t *testing.T) { + tests := []struct { + name string + crd acidv1.PostgresTeamList + ptm PostgresTeamMap + error string + }{ + { + "Check that CRD is imported correctly into the internal format", + pgTeamList, + PostgresTeamMap{ + "teamA": { + AdditionalSuperuserTeams: []string{"teamB", "team24x7"}, + AdditionalTeams: []string{"teamC"}, + AdditionalMembers: []string{}, + }, + "teamB": { + AdditionalSuperuserTeams: []string{"teamA", "teamC", "team24x7"}, + AdditionalTeams: []string{}, + AdditionalMembers: []string{"drno"}, + }, + "teamC": { + AdditionalSuperuserTeams: []string{"team24x7"}, + AdditionalTeams: []string{"teamA", "teamB", "acid"}, + AdditionalMembers: []string{}, + }, + "team24x7": { + AdditionalSuperuserTeams: []string{}, + AdditionalTeams: []string{}, + AdditionalMembers: []string{"optimusprime"}, + }, + "acid": { + AdditionalSuperuserTeams: []string{}, + AdditionalTeams: []string{}, + AdditionalMembers: []string{"batman"}, + }, + }, + "Mismatch between PostgresTeam CRD and internal map", + }, + } + + for _, tt := range tests { + postgresTeamMap := PostgresTeamMap{} + postgresTeamMap.Load(&tt.crd) + for team, ptmeamMembership := range postgresTeamMap { + if !util.IsEqualIgnoreOrder(ptmeamMembership.AdditionalSuperuserTeams, tt.ptm[team].AdditionalSuperuserTeams) { + t.Errorf("%s: %v: expected additional members %#v, got %#v", tt.name, tt.error, tt.ptm, postgresTeamMap) + } + if !util.IsEqualIgnoreOrder(ptmeamMembership.AdditionalTeams, tt.ptm[team].AdditionalTeams) { + t.Errorf("%s: %v: expected additional teams %#v, got %#v", tt.name, tt.error, tt.ptm, postgresTeamMap) + } + if !util.IsEqualIgnoreOrder(ptmeamMembership.AdditionalMembers, tt.ptm[team].AdditionalMembers) { + t.Errorf("%s: %v: expected additional superuser teams %#v, got %#v", tt.name, tt.error, tt.ptm, postgresTeamMap) + } + } + } +} + +// TestGetAdditionalTeams if returns teams with and without transitive dependencies +func TestGetAdditionalTeams(t *testing.T) { + tests := []struct { + name string + team string + transitive bool + teams []string + error string + }{ + { + "Check that additional teams are returned", + "teamA", + false, + []string{"teamC"}, + "GetAdditionalTeams returns wrong list", + }, + { + "Check that additional teams are returned incl. transitive teams", + "teamA", + true, + []string{"teamC", "teamB", "acid"}, + "GetAdditionalTeams returns wrong list", + }, + { + "Check that empty list is returned", + "teamB", + false, + []string{}, + "GetAdditionalTeams returns wrong list", + }, + } + + postgresTeamMap := PostgresTeamMap{} + postgresTeamMap.Load(&pgTeamList) + + for _, tt := range tests { + additionalTeams := postgresTeamMap.GetAdditionalTeams(tt.team, tt.transitive) + if !util.IsEqualIgnoreOrder(additionalTeams, tt.teams) { + t.Errorf("%s: %v: expected additional teams %#v, got %#v", tt.name, tt.error, tt.teams, additionalTeams) + } + } +} + +// TestGetAdditionalSuperuserTeams if returns teams with and without transitive dependencies +func TestGetAdditionalSuperuserTeams(t *testing.T) { + tests := []struct { + name string + team string + transitive bool + teams []string + error string + }{ + { + "Check that additional superuser teams are returned", + "teamA", + false, + []string{"teamB", "team24x7"}, + "GetAdditionalSuperuserTeams returns wrong list", + }, + { + "Check that additional superuser teams are returned incl. transitive superuser teams", + "teamA", + true, + []string{"teamB", "teamC", "team24x7"}, + "GetAdditionalSuperuserTeams returns wrong list", + }, + { + "Check that empty list is returned", + "team24x7", + false, + []string{}, + "GetAdditionalSuperuserTeams returns wrong list", + }, + } + + postgresTeamMap := PostgresTeamMap{} + postgresTeamMap.Load(&pgTeamList) + + for _, tt := range tests { + additionalTeams := postgresTeamMap.GetAdditionalSuperuserTeams(tt.team, tt.transitive) + if !util.IsEqualIgnoreOrder(additionalTeams, tt.teams) { + t.Errorf("%s: %v: expected additional teams %#v, got %#v", tt.name, tt.error, tt.teams, additionalTeams) + } + } +} diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 348452193..62cfdec57 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -28,14 +28,19 @@ type Resources struct { PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"` PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"` PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` + SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"` + SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"` SpiloFSGroup *int64 `name:"spilo_fsgroup"` PodPriorityClassName string `name:"pod_priority_class_name"` ClusterDomain string `name:"cluster_domain" default:"cluster.local"` SpiloPrivileged bool `name:"spilo_privileged" default:"false"` ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"` InheritedLabels []string `name:"inherited_labels" default:""` + InheritedAnnotations []string `name:"inherited_annotations" default:""` DownscalerAnnotations []string `name:"downscaler_annotations"` ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"` + DeleteAnnotationDateKey string `name:"delete_annotation_date_key"` + DeleteAnnotationNameKey string `name:"delete_annotation_name_key"` PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` PodToleration map[string]string `name:"toleration" default:""` DefaultCPURequest string `name:"default_cpu_request" default:"100m"` @@ -45,22 +50,52 @@ type Resources struct { MinCPULimit string `name:"min_cpu_limit" default:"250m"` MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"` PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"` + PodEnvironmentSecret string `name:"pod_environment_secret"` NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""` MaxInstances int32 `name:"max_instances" default:"-1"` MinInstances int32 `name:"min_instances" default:"-1"` ShmVolume *bool `name:"enable_shm_volume" default:"true"` } +type InfrastructureRole struct { + // Name of a secret which describes the role, and optionally name of a + // configmap with an extra information + SecretName spec.NamespacedName + + UserKey string + PasswordKey string + RoleKey string + + DefaultUserValue string + DefaultRoleValue string + + // This field point out the detailed yaml definition of the role, if exists + Details string + + // Specify if a secret contains multiple fields in the following format: + // + // %(userkey)idx: ... + // %(passwordkey)idx: ... + // %(rolekey)idx: ... + // + // If it does, Name/Password/Role are interpreted not as unique field + // names, but as a template. + + Template bool +} + // Auth describes authentication specific configuration parameters type Auth struct { - SecretNameTemplate StringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"` - PamRoleName string `name:"pam_role_name" default:"zalandos"` - PamConfiguration string `name:"pam_configuration" default:"https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees"` - TeamsAPIUrl string `name:"teams_api_url" default:"https://teams.example.com/api/"` - OAuthTokenSecretName spec.NamespacedName `name:"oauth_token_secret_name" default:"postgresql-operator"` - InfrastructureRolesSecretName spec.NamespacedName `name:"infrastructure_roles_secret_name"` - SuperUsername string `name:"super_username" default:"postgres"` - ReplicationUsername string `name:"replication_username" default:"standby"` + SecretNameTemplate StringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"` + PamRoleName string `name:"pam_role_name" default:"zalandos"` + PamConfiguration string `name:"pam_configuration" default:"https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees"` + TeamsAPIUrl string `name:"teams_api_url" default:"https://teams.example.com/api/"` + OAuthTokenSecretName spec.NamespacedName `name:"oauth_token_secret_name" default:"postgresql-operator"` + InfrastructureRolesSecretName spec.NamespacedName `name:"infrastructure_roles_secret_name"` + InfrastructureRoles []*InfrastructureRole `name:"-"` + InfrastructureRolesDefs string `name:"infrastructure_roles_secrets"` + SuperUsername string `name:"super_username" default:"postgres"` + ReplicationUsername string `name:"replication_username" default:"standby"` } // Scalyr holds the configuration for the Scalyr Agent sidecar for log shipping: @@ -76,14 +111,16 @@ type Scalyr struct { // LogicalBackup defines configuration for logical backup type LogicalBackup struct { - LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` - LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"` - LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` - LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""` - LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""` - LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""` - LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""` - LogicalBackupS3SSE string `name:"logical_backup_s3_sse" default:""` + LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` + LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"` + LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"` + LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` + LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""` + LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""` + LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""` + LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""` + LogicalBackupS3SSE string `name:"logical_backup_s3_sse" default:""` + LogicalBackupGoogleApplicationCredentials string `name:"logical_backup_google_application_credentials" default:""` } // Operator options for connection pooler @@ -109,14 +146,13 @@ type Config struct { LogicalBackup ConnectionPooler - WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' - KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"` - EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS - DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p3"` - // deprecated in favour of SidecarContainers - SidecarImages map[string]string `name:"sidecar_docker_images"` - SidecarContainers []v1.Container `name:"sidecars"` - PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` + WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' + KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"` + EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS + DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.0-p2"` + SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers + SidecarContainers []v1.Container `name:"sidecars"` + PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` // value of this string must be valid JSON or YAML; see initPodServiceAccount PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""` @@ -126,44 +162,53 @@ type Config struct { WALES3Bucket string `name:"wal_s3_bucket"` LogS3Bucket string `name:"log_s3_bucket"` KubeIAMRole string `name:"kube_iam_role"` + WALGSBucket string `name:"wal_gs_bucket"` + GCPCredentials string `name:"gcp_credentials"` AdditionalSecretMount string `name:"additional_secret_mount"` AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"` + EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"` + EnableEBSGp3MigrationMaxSize int64 `name:"enable_ebs_gp3_migration_max_size" default:"1000"` DebugLogging bool `name:"debug_logging" default:"true"` EnableDBAccess bool `name:"enable_database_access" default:"true"` EnableTeamsAPI bool `name:"enable_teams_api" default:"true"` EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"` TeamAdminRole string `name:"team_admin_role" default:"admin"` EnableAdminRoleForUsers bool `name:"enable_admin_role_for_users" default:"true"` + EnablePostgresTeamCRD bool `name:"enable_postgres_team_crd" default:"false"` + EnablePostgresTeamCRDSuperusers bool `name:"enable_postgres_team_crd_superusers" default:"false"` EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"` EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"` CustomServiceAnnotations map[string]string `name:"custom_service_annotations"` CustomPodAnnotations map[string]string `name:"custom_pod_annotations"` EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"` PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"` - // deprecated and kept for backward compatibility - EnableLoadBalancer *bool `name:"enable_load_balancer"` - MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` - ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` - PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"` - EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"` - EnableInitContainers *bool `name:"enable_init_containers" default:"true"` - EnableSidecars *bool `name:"enable_sidecars" default:"true"` - Workers uint32 `name:"workers" default:"4"` - APIPort int `name:"api_port" default:"8080"` - RingLogLines int `name:"ring_log_lines" default:"100"` - ClusterHistoryEntries int `name:"cluster_history_entries" default:"1000"` - TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"` - PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` - PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"` - ProtectedRoles []string `name:"protected_role_names" default:"admin"` - PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` - SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"` - EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"` + StorageResizeMode string `name:"storage_resize_mode" default:"pvc"` + EnableLoadBalancer *bool `name:"enable_load_balancer"` // deprecated and kept for backward compatibility + ExternalTrafficPolicy string `name:"external_traffic_policy" default:"Cluster"` + MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` + ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` + PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"` + EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"` + EnableInitContainers *bool `name:"enable_init_containers" default:"true"` + EnableSidecars *bool `name:"enable_sidecars" default:"true"` + Workers uint32 `name:"workers" default:"8"` + APIPort int `name:"api_port" default:"8080"` + RingLogLines int `name:"ring_log_lines" default:"100"` + ClusterHistoryEntries int `name:"cluster_history_entries" default:"1000"` + TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"` + PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` + PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"` + ProtectedRoles []string `name:"protected_role_names" default:"admin"` + PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` + SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"` + EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"` + EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"` + EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"` } // MustMarshal marshals the config or panics func (c Config) MustMarshal() string { - b, err := json.MarshalIndent(c, "", "\t") + b, err := json.MarshalIndent(c, "", " ") if err != nil { panic(err) } diff --git a/pkg/util/constants/pooler.go b/pkg/util/constants/pooler.go index 52e47c9cd..ded795bbe 100644 --- a/pkg/util/constants/pooler.go +++ b/pkg/util/constants/pooler.go @@ -14,5 +14,5 @@ const ( ConnectionPoolerContainer = 0 ConnectionPoolerMaxDBConnections = 60 ConnectionPoolerMaxClientConnections = 10000 - ConnectionPoolerMinInstances = 2 + ConnectionPoolerMinInstances = 1 ) diff --git a/pkg/util/constants/roles.go b/pkg/util/constants/roles.go index 87c9c51ce..dd906fe80 100644 --- a/pkg/util/constants/roles.go +++ b/pkg/util/constants/roles.go @@ -2,20 +2,21 @@ package constants // Roles specific constants const ( - PasswordLength = 64 - SuperuserKeyName = "superuser" + PasswordLength = 64 + SuperuserKeyName = "superuser" ConnectionPoolerUserKeyName = "pooler" - ReplicationUserKeyName = "replication" - RoleFlagSuperuser = "SUPERUSER" - RoleFlagInherit = "INHERIT" - RoleFlagLogin = "LOGIN" - RoleFlagNoLogin = "NOLOGIN" - RoleFlagCreateRole = "CREATEROLE" - RoleFlagCreateDB = "CREATEDB" - RoleFlagReplication = "REPLICATION" - RoleFlagByPassRLS = "BYPASSRLS" - OwnerRoleNameSuffix = "_owner" - ReaderRoleNameSuffix = "_reader" - WriterRoleNameSuffix = "_writer" - UserRoleNameSuffix = "_user" + ReplicationUserKeyName = "replication" + RoleFlagSuperuser = "SUPERUSER" + RoleFlagInherit = "INHERIT" + RoleFlagLogin = "LOGIN" + RoleFlagNoLogin = "NOLOGIN" + RoleFlagCreateRole = "CREATEROLE" + RoleFlagCreateDB = "CREATEDB" + RoleFlagReplication = "REPLICATION" + RoleFlagByPassRLS = "BYPASSRLS" + OwnerRoleNameSuffix = "_owner" + ReaderRoleNameSuffix = "_reader" + WriterRoleNameSuffix = "_writer" + UserRoleNameSuffix = "_user" + DefaultSearchPath = "\"$user\"" ) diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index d7be2f48a..dd6ec1e8b 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -6,16 +6,22 @@ import ( "reflect" b64 "encoding/base64" + "encoding/json" batchv1beta1 "k8s.io/api/batch/v1beta1" clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" + apiacidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + acidv1client "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" + acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/spec" apiappsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" policybeta1 "k8s.io/api/policy/v1beta1" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apiextbeta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" @@ -24,9 +30,6 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - - acidv1client "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func Int32ToPointer(value int32) *int32 { @@ -50,8 +53,11 @@ type KubernetesClient struct { appsv1.DeploymentsGetter rbacv1.RoleBindingsGetter policyv1beta1.PodDisruptionBudgetsGetter - apiextbeta1.CustomResourceDefinitionsGetter + apiextv1.CustomResourceDefinitionsGetter clientbatchv1beta1.CronJobsGetter + acidv1.OperatorConfigurationsGetter + acidv1.PostgresTeamsGetter + acidv1.PostgresqlsGetter RESTClient rest.Interface AcidV1ClientSet *acidv1client.Clientset @@ -150,17 +156,52 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { return kubeClient, fmt.Errorf("could not create api client:%v", err) } - kubeClient.CustomResourceDefinitionsGetter = apiextClient.ApiextensionsV1beta1() + kubeClient.CustomResourceDefinitionsGetter = apiextClient.ApiextensionsV1() + kubeClient.AcidV1ClientSet = acidv1client.NewForConfigOrDie(cfg) + if err != nil { + return kubeClient, fmt.Errorf("could not create acid.zalan.do clientset: %v", err) + } + + kubeClient.OperatorConfigurationsGetter = kubeClient.AcidV1ClientSet.AcidV1() + kubeClient.PostgresTeamsGetter = kubeClient.AcidV1ClientSet.AcidV1() + kubeClient.PostgresqlsGetter = kubeClient.AcidV1ClientSet.AcidV1() return kubeClient, nil } +// SetPostgresCRDStatus of Postgres cluster +func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string) (*apiacidv1.Postgresql, error) { + var pg *apiacidv1.Postgresql + var pgStatus apiacidv1.PostgresStatus + pgStatus.PostgresClusterStatus = status + + patch, err := json.Marshal(struct { + PgStatus interface{} `json:"status"` + }{&pgStatus}) + + if err != nil { + return pg, fmt.Errorf("could not marshal status: %v", err) + } + + // we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ), + // however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11) + // we should take advantage of it. + pg, err = client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).Patch( + context.TODO(), clusterName.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status") + if err != nil { + return pg, fmt.Errorf("could not update status: %v", err) + } + + // update the spec, maintaining the new resourceVersion. + return pg, nil +} + // SameService compares the Services func SameService(cur, new *v1.Service) (match bool, reason string) { //TODO: improve comparison if cur.Spec.Type != new.Spec.Type { - return false, fmt.Sprintf("new service's type %q doesn't match the current one %q", + return false, fmt.Sprintf("new service's type %q does not match the current one %q", new.Spec.Type, cur.Spec.Type) } @@ -170,13 +211,13 @@ func SameService(cur, new *v1.Service) (match bool, reason string) { /* work around Kubernetes 1.6 serializing [] as nil. See https://github.com/kubernetes/kubernetes/issues/43203 */ if (len(oldSourceRanges) != 0) || (len(newSourceRanges) != 0) { if !reflect.DeepEqual(oldSourceRanges, newSourceRanges) { - return false, "new service's LoadBalancerSourceRange doesn't match the current one" + return false, "new service's LoadBalancerSourceRange does not match the current one" } } match = true - reasonPrefix := "new service's annotations doesn't match the current one:" + reasonPrefix := "new service's annotations does not match the current one:" for ann := range cur.Annotations { if _, ok := new.Annotations[ann]; !ok { match = false @@ -212,7 +253,7 @@ func SamePDB(cur, new *policybeta1.PodDisruptionBudget) (match bool, reason stri //TODO: improve comparison match = reflect.DeepEqual(new.Spec, cur.Spec) if !match { - reason = "new PDB spec doesn't match the current one" + reason = "new PDB spec does not match the current one" } return @@ -226,14 +267,14 @@ func getJobImage(cronJob *batchv1beta1.CronJob) string { func SameLogicalBackupJob(cur, new *batchv1beta1.CronJob) (match bool, reason string) { if cur.Spec.Schedule != new.Spec.Schedule { - return false, fmt.Sprintf("new job's schedule %q doesn't match the current one %q", + return false, fmt.Sprintf("new job's schedule %q does not match the current one %q", new.Spec.Schedule, cur.Spec.Schedule) } newImage := getJobImage(new) curImage := getJobImage(cur) if newImage != curImage { - return false, fmt.Sprintf("new job's image %q doesn't match the current one %q", + return false, fmt.Sprintf("new job's image %q does not match the current one %q", newImage, curImage) } @@ -241,31 +282,73 @@ func SameLogicalBackupJob(cur, new *batchv1beta1.CronJob) (match bool, reason st } func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) { - if name != "infrastructureroles-test" { - return nil, fmt.Errorf("NotFound") - } - secret := &v1.Secret{} - secret.Name = "testcluster" - secret.Data = map[string][]byte{ + oldFormatSecret := &v1.Secret{} + oldFormatSecret.Name = "testcluster" + oldFormatSecret.Data = map[string][]byte{ "user1": []byte("testrole"), "password1": []byte("testpassword"), "inrole1": []byte("testinrole"), "foobar": []byte(b64.StdEncoding.EncodeToString([]byte("password"))), } - return secret, nil + + newFormatSecret := &v1.Secret{} + newFormatSecret.Name = "test-secret-new-format" + newFormatSecret.Data = map[string][]byte{ + "user": []byte("new-test-role"), + "password": []byte("new-test-password"), + "inrole": []byte("new-test-inrole"), + "new-foobar": []byte(b64.StdEncoding.EncodeToString([]byte("password"))), + } + + secrets := map[string]*v1.Secret{ + "infrastructureroles-old-test": oldFormatSecret, + "infrastructureroles-new-test": newFormatSecret, + } + + for idx := 1; idx <= 2; idx++ { + newFormatStandaloneSecret := &v1.Secret{} + newFormatStandaloneSecret.Name = fmt.Sprintf("test-secret-new-format%d", idx) + newFormatStandaloneSecret.Data = map[string][]byte{ + "user": []byte(fmt.Sprintf("new-test-role%d", idx)), + "password": []byte(fmt.Sprintf("new-test-password%d", idx)), + "inrole": []byte(fmt.Sprintf("new-test-inrole%d", idx)), + } + + secrets[fmt.Sprintf("infrastructureroles-new-test%d", idx)] = + newFormatStandaloneSecret + } + + if secret, exists := secrets[name]; exists { + return secret, nil + } + + return nil, fmt.Errorf("NotFound") } func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) { - if name != "infrastructureroles-test" { - return nil, fmt.Errorf("NotFound") - } - configmap := &v1.ConfigMap{} - configmap.Name = "testcluster" - configmap.Data = map[string]string{ + oldFormatConfigmap := &v1.ConfigMap{} + oldFormatConfigmap.Name = "testcluster" + oldFormatConfigmap.Data = map[string]string{ "foobar": "{}", } - return configmap, nil + + newFormatConfigmap := &v1.ConfigMap{} + newFormatConfigmap.Name = "testcluster" + newFormatConfigmap.Data = map[string]string{ + "new-foobar": "{\"user_flags\": [\"createdb\"]}", + } + + configmaps := map[string]*v1.ConfigMap{ + "infrastructureroles-old-test": oldFormatConfigmap, + "infrastructureroles-new-test": newFormatConfigmap, + } + + if configmap, exists := configmaps[name]; exists { + return configmap, nil + } + + return nil, fmt.Errorf("NotFound") } // Secrets to be mocked diff --git a/pkg/util/k8sutil/k8sutil_test.go b/pkg/util/k8sutil/k8sutil_test.go index 9b4f2eac3..b3e768501 100644 --- a/pkg/util/k8sutil/k8sutil_test.go +++ b/pkg/util/k8sutil/k8sutil_test.go @@ -63,7 +63,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's type "LoadBalancer" doesn't match the current one "ClusterIP"`, + reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`, }, { about: "services differ on lb source ranges", @@ -82,7 +82,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"185.249.56.0/22"}), match: false, - reason: `new service's LoadBalancerSourceRange doesn't match the current one`, + reason: `new service's LoadBalancerSourceRange does not match the current one`, }, { about: "new service doesn't have lb source ranges", @@ -101,7 +101,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{}), match: false, - reason: `new service's LoadBalancerSourceRange doesn't match the current one`, + reason: `new service's LoadBalancerSourceRange does not match the current one`, }, { about: "services differ on DNS annotation", @@ -120,7 +120,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: 'external-dns.alpha.kubernetes.io/hostname' changed from 'clstr.acid.zalan.do' to 'new_clstr.acid.zalan.do'.`, + reason: `new service's annotations does not match the current one: 'external-dns.alpha.kubernetes.io/hostname' changed from 'clstr.acid.zalan.do' to 'new_clstr.acid.zalan.do'.`, }, { about: "services differ on AWS ELB annotation", @@ -139,7 +139,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: 'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout' changed from '3600' to '1800'.`, + reason: `new service's annotations does not match the current one: 'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout' changed from '3600' to '1800'.`, }, { about: "service changes existing annotation", @@ -160,7 +160,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: 'foo' changed from 'bar' to 'baz'.`, + reason: `new service's annotations does not match the current one: 'foo' changed from 'bar' to 'baz'.`, }, { about: "service changes multiple existing annotations", @@ -184,7 +184,7 @@ func TestSameService(t *testing.T) { []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, // Test just the prefix to avoid flakiness and map sorting - reason: `new service's annotations doesn't match the current one:`, + reason: `new service's annotations does not match the current one:`, }, { about: "service adds a new custom annotation", @@ -204,7 +204,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: Added 'foo' with value 'bar'.`, + reason: `new service's annotations does not match the current one: Added 'foo' with value 'bar'.`, }, { about: "service removes a custom annotation", @@ -224,7 +224,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: Removed 'foo'.`, + reason: `new service's annotations does not match the current one: Removed 'foo'.`, }, { about: "service removes a custom annotation and adds a new one", @@ -245,7 +245,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'.`, + reason: `new service's annotations does not match the current one: Removed 'foo'. Added 'bar' with value 'foo'.`, }, { about: "service removes a custom annotation, adds a new one and change another", @@ -269,7 +269,7 @@ func TestSameService(t *testing.T) { []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, // Test just the prefix to avoid flakiness and map sorting - reason: `new service's annotations doesn't match the current one: Removed 'foo'.`, + reason: `new service's annotations does not match the current one: Removed 'foo'.`, }, { about: "service add annotations", @@ -286,7 +286,7 @@ func TestSameService(t *testing.T) { []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, // Test just the prefix to avoid flakiness and map sorting - reason: `new service's annotations doesn't match the current one: Added `, + reason: `new service's annotations does not match the current one: Added `, }, } for _, tt := range tests { diff --git a/pkg/util/nicediff/diff.go b/pkg/util/nicediff/diff.go new file mode 100644 index 000000000..e2793f2c7 --- /dev/null +++ b/pkg/util/nicediff/diff.go @@ -0,0 +1,191 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package nicediff + +import ( + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string, skipEqual bool) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + return Render(DiffChunks(aLines, bLines), skipEqual) +} + +// Render renders the slice of chunks into a representation that prefixes +// the lines with '+', '-', or ' ' depending on whether the line was added, +// removed, or equal (respectively). +func Render(chunks []Chunk, skipEqual bool) string { + buf := new(strings.Builder) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + if !skipEqual { + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/pkg/util/users/users.go b/pkg/util/users/users.go index 345caa001..5d97336e6 100644 --- a/pkg/util/users/users.go +++ b/pkg/util/users/users.go @@ -28,6 +28,7 @@ const ( // an existing roles of another role membership, nor it removes the already assigned flag // (except for the NOLOGIN). TODO: process other NOflags, i.e. NOSUPERUSER correctly. type DefaultUserSyncStrategy struct { + PasswordEncryption string } // ProduceSyncRequests figures out the types of changes that need to happen with the given users. @@ -45,7 +46,7 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM } } else { r := spec.PgSyncUserRequest{} - newMD5Password := util.PGUserPassword(newUser) + newMD5Password := util.NewEncryptor(strategy.PasswordEncryption).PGUserPassword(newUser) if dbUser.Password != newMD5Password { r.User.Password = newMD5Password @@ -113,14 +114,14 @@ func (strategy DefaultUserSyncStrategy) ExecuteSyncRequests(requests []spec.PgSy return nil } -func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql.DB) (err error) { + +func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql.DB) error { queries := produceAlterRoleSetStmts(user) query := fmt.Sprintf(doBlockStmt, strings.Join(queries, ";")) - if _, err = db.Exec(query); err != nil { - err = fmt.Errorf("dB error: %v, query: %s", err, query) - return + if _, err := db.Exec(query); err != nil { + return fmt.Errorf("dB error: %v, query: %s", err, query) } - return + return nil } func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.DB) error { @@ -140,7 +141,7 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D if user.Password == "" { userPassword = "PASSWORD NULL" } else { - userPassword = fmt.Sprintf(passwordTemplate, util.PGUserPassword(user)) + userPassword = fmt.Sprintf(passwordTemplate, util.NewEncryptor(strategy.PasswordEncryption).PGUserPassword(user)) } query := fmt.Sprintf(createUserSQL, user.Name, strings.Join(userFlags, " "), userPassword) @@ -148,6 +149,12 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D return fmt.Errorf("dB error: %v, query: %s", err, query) } + if len(user.Parameters) > 0 { + if err := strategy.alterPgUserSet(user, db); err != nil { + return fmt.Errorf("incomplete setup for user %s: %v", user.Name, err) + } + } + return nil } @@ -155,7 +162,7 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB var resultStmt []string if user.Password != "" || len(user.Flags) > 0 { - alterStmt := produceAlterStmt(user) + alterStmt := produceAlterStmt(user, strategy.PasswordEncryption) resultStmt = append(resultStmt, alterStmt) } if len(user.MemberOf) > 0 { @@ -174,14 +181,14 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB return nil } -func produceAlterStmt(user spec.PgUser) string { +func produceAlterStmt(user spec.PgUser, encryption string) string { // ALTER ROLE ... LOGIN ENCRYPTED PASSWORD .. result := make([]string, 0) password := user.Password flags := user.Flags if password != "" { - result = append(result, fmt.Sprintf(passwordTemplate, util.PGUserPassword(user))) + result = append(result, fmt.Sprintf(passwordTemplate, util.NewEncryptor(encryption).PGUserPassword(user))) } if len(flags) != 0 { result = append(result, strings.Join(flags, " ")) diff --git a/pkg/util/util.go b/pkg/util/util.go index 5701429aa..20e2915ba 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -1,13 +1,18 @@ package util import ( + "crypto/hmac" "crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords cryptoRand "crypto/rand" + "crypto/sha256" + "encoding/base64" "encoding/hex" "fmt" "math/big" "math/rand" + "reflect" "regexp" + "sort" "strings" "time" @@ -16,10 +21,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/zalando/postgres-operator/pkg/spec" + "golang.org/x/crypto/pbkdf2" ) const ( - md5prefix = "md5" + md5prefix = "md5" + scramsha256prefix = "SCRAM-SHA-256" + saltlength = 16 + iterations = 4096 ) var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") @@ -61,16 +70,62 @@ func NameFromMeta(meta metav1.ObjectMeta) spec.NamespacedName { } } -// PGUserPassword is used to generate md5 password hash for a given user. It does nothing for already hashed passwords. -func PGUserPassword(user spec.PgUser) string { - if (len(user.Password) == md5.Size*2+len(md5prefix) && user.Password[:3] == md5prefix) || user.Password == "" { +type Hasher func(user spec.PgUser) string +type Random func(n int) string + +type Encryptor struct { + encrypt Hasher + random Random +} + +func NewEncryptor(encryption string) *Encryptor { + e := Encryptor{random: RandomPassword} + m := map[string]Hasher{ + "md5": e.PGUserPasswordMD5, + "scram-sha-256": e.PGUserPasswordScramSHA256, + } + hasher, ok := m[encryption] + if !ok { + hasher = e.PGUserPasswordMD5 + } + e.encrypt = hasher + return &e +} + +func (e *Encryptor) PGUserPassword(user spec.PgUser) string { + if (len(user.Password) == md5.Size*2+len(md5prefix) && user.Password[:3] == md5prefix) || + (len(user.Password) > len(scramsha256prefix) && user.Password[:len(scramsha256prefix)] == scramsha256prefix) || user.Password == "" { // Avoid processing already encrypted or empty passwords return user.Password } + return e.encrypt(user) +} + +func (e *Encryptor) PGUserPasswordMD5(user spec.PgUser) string { s := md5.Sum([]byte(user.Password + user.Name)) // #nosec, using md5 since PostgreSQL uses it for hashing passwords. return md5prefix + hex.EncodeToString(s[:]) } +func (e *Encryptor) PGUserPasswordScramSHA256(user spec.PgUser) string { + salt := []byte(e.random(saltlength)) + key := pbkdf2.Key([]byte(user.Password), salt, iterations, 32, sha256.New) + mac := hmac.New(sha256.New, key) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + mac = hmac.New(sha256.New, key) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + storedKey := sha256.Sum256(clientKey) + pass := fmt.Sprintf("%s$%v:%s$%s:%s", + scramsha256prefix, + iterations, + base64.StdEncoding.EncodeToString(salt), + base64.StdEncoding.EncodeToString(storedKey[:]), + base64.StdEncoding.EncodeToString(serverKey), + ) + return pass +} + // Diff returns diffs between 2 objects func Diff(a, b interface{}) []string { return pretty.Diff(a, b) @@ -81,6 +136,21 @@ func PrettyDiff(a, b interface{}) string { return strings.Join(Diff(a, b), "\n") } +// Compare two string slices while ignoring the order of elements +func IsEqualIgnoreOrder(a, b []string) bool { + if len(a) != len(b) { + return false + } + a_copy := make([]string, len(a)) + b_copy := make([]string, len(b)) + copy(a_copy, a) + copy(b_copy, b) + sort.Strings(a_copy) + sort.Strings(b_copy) + + return reflect.DeepEqual(a_copy, b_copy) +} + // SubstractStringSlices finds elements in a that are not in b and return them as a result slice. func SubstractStringSlices(a []string, b []string) (result []string, equal bool) { // Slices are assumed to contain unique elements only @@ -123,6 +193,20 @@ func FindNamedStringSubmatch(r *regexp.Regexp, s string) map[string]string { return res } +// SliceContains +func SliceContains(slice interface{}, item interface{}) bool { + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + panic("Invalid data-type") + } + for i := 0; i < s.Len(); i++ { + if s.Index(i).Interface() == item { + return true + } + } + return false +} + // MapContains returns true if and only if haystack contains all the keys from the needle with matching corresponding values func MapContains(haystack, needle map[string]string) bool { if len(haystack) < len(needle) { @@ -147,6 +231,30 @@ func Coalesce(val, defaultVal string) string { return val } +// CoalesceStrArr returns the first argument if it is not null, otherwise the second one. +func CoalesceStrArr(val, defaultVal []string) []string { + if len(val) == 0 { + return defaultVal + } + return val +} + +// CoalesceStrMap returns the first argument if it is not null, otherwise the second one. +func CoalesceStrMap(val, defaultVal map[string]string) map[string]string { + if len(val) == 0 { + return defaultVal + } + return val +} + +// CoalesceInt works like coalesce but for int +func CoalesceInt(val, defaultVal int) int { + if val == 0 { + return defaultVal + } + return val +} + // CoalesceInt32 works like coalesce but for *int32 func CoalesceInt32(val, defaultVal *int32) *int32 { if val == nil { @@ -155,6 +263,14 @@ func CoalesceInt32(val, defaultVal *int32) *int32 { return val } +// CoalesceUInt32 works like coalesce but for uint32 +func CoalesceUInt32(val, defaultVal uint32) uint32 { + if val == 0 { + return defaultVal + } + return val +} + // CoalesceBool works like coalesce but for *bool func CoalesceBool(val, defaultVal *bool) *bool { if val == nil { @@ -163,6 +279,18 @@ func CoalesceBool(val, defaultVal *bool) *bool { return val } +// CoalesceDuration works like coalesce but for time.Duration +func CoalesceDuration(val time.Duration, defaultVal string) time.Duration { + if val == 0 { + duration, err := time.ParseDuration(defaultVal) + if err != nil { + panic(err) + } + return duration + } + return val +} + // Test if any of the values is nil func testNil(values ...*int32) bool { for _, v := range values { diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 1f86ea1b4..c02d2c075 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -12,20 +12,27 @@ import ( ) var pgUsers = []struct { - in spec.PgUser - out string + in spec.PgUser + outmd5 string + outscramsha256 string }{{spec.PgUser{ Name: "test", Password: "password", Flags: []string{}, MemberOf: []string{}}, - "md587f77988ccb5aa917c93201ba314fcd4"}, + "md587f77988ccb5aa917c93201ba314fcd4", "SCRAM-SHA-256$4096:c2FsdA==$lF4cRm/Jky763CN4HtxdHnjV4Q8AWTNlKvGmEFFU8IQ=:ub8OgRsftnk2ccDMOt7ffHXNcikRkQkq1lh4xaAqrSw="}, {spec.PgUser{ Name: "test", Password: "md592f413f3974bdf3799bb6fecb5f9f2c6", Flags: []string{}, MemberOf: []string{}}, - "md592f413f3974bdf3799bb6fecb5f9f2c6"}} + "md592f413f3974bdf3799bb6fecb5f9f2c6", "md592f413f3974bdf3799bb6fecb5f9f2c6"}, + {spec.PgUser{ + Name: "test", + Password: "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs=", + Flags: []string{}, + MemberOf: []string{}}, + "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs=", "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs="}} var prettyDiffTest = []struct { inA interface{} @@ -36,6 +43,17 @@ var prettyDiffTest = []struct { {[]int{1, 2, 3, 4}, []int{1, 2, 3, 4}, ""}, } +var isEqualIgnoreOrderTest = []struct { + inA []string + inB []string + outEqual bool +}{ + {[]string{"a", "b", "c"}, []string{"a", "b", "c"}, true}, + {[]string{"a", "b", "c"}, []string{"a", "c", "b"}, true}, + {[]string{"a", "b"}, []string{"a", "c", "b"}, false}, + {[]string{"a", "b", "c"}, []string{"a", "d", "c"}, false}, +} + var substractTest = []struct { inA []string inB []string @@ -46,6 +64,16 @@ var substractTest = []struct { {[]string{"a", "b", "c", "d"}, []string{"a", "bb", "c", "d"}, []string{"b"}, false}, } +var sliceContaintsTest = []struct { + slice []string + item string + out bool +}{ + {[]string{"a", "b", "c"}, "a", true}, + {[]string{"a", "b", "c"}, "d", false}, + {[]string{}, "d", false}, +} + var mapContaintsTest = []struct { inA map[string]string inB map[string]string @@ -107,9 +135,16 @@ func TestNameFromMeta(t *testing.T) { func TestPGUserPassword(t *testing.T) { for _, tt := range pgUsers { - pwd := PGUserPassword(tt.in) - if pwd != tt.out { - t.Errorf("PgUserPassword expected: %q, got: %q", tt.out, pwd) + e := NewEncryptor("md5") + pwd := e.PGUserPassword(tt.in) + if pwd != tt.outmd5 { + t.Errorf("PgUserPassword expected: %q, got: %q", tt.outmd5, pwd) + } + e = NewEncryptor("scram-sha-256") + e.random = func(n int) string { return "salt" } + pwd = e.PGUserPassword(tt.in) + if pwd != tt.outscramsha256 { + t.Errorf("PgUserPassword expected: %q, got: %q", tt.outscramsha256, pwd) } } } @@ -122,6 +157,15 @@ func TestPrettyDiff(t *testing.T) { } } +func TestIsEqualIgnoreOrder(t *testing.T) { + for _, tt := range isEqualIgnoreOrderTest { + actualEqual := IsEqualIgnoreOrder(tt.inA, tt.inB) + if actualEqual != tt.outEqual { + t.Errorf("IsEqualIgnoreOrder expected: %t, got: %t", tt.outEqual, actualEqual) + } + } +} + func TestSubstractSlices(t *testing.T) { for _, tt := range substractTest { actualRes, actualEqual := SubstractStringSlices(tt.inA, tt.inB) @@ -146,6 +190,15 @@ func TestFindNamedStringSubmatch(t *testing.T) { } } +func TestSliceContains(t *testing.T) { + for _, tt := range sliceContaintsTest { + res := SliceContains(tt.slice, tt.item) + if res != tt.out { + t.Errorf("SliceContains expected: %#v, got: %#v", tt.out, res) + } + } +} + func TestMapContains(t *testing.T) { for _, tt := range mapContaintsTest { res := MapContains(tt.inA, tt.inB) @@ -166,3 +219,13 @@ func TestIsSmallerQuantity(t *testing.T) { } } } + +/* +func TestNiceDiff(t *testing.T) { + o := "a\nb\nc\n" + n := "b\nd\n" + d := nicediff.Diff(o, n, true) + t.Log(d) + // t.Errorf("Lets see output") +} +*/ diff --git a/pkg/util/volumes/ebs.go b/pkg/util/volumes/ebs.go index 666436a06..17016fb09 100644 --- a/pkg/util/volumes/ebs.go +++ b/pkg/util/volumes/ebs.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/retryutil" @@ -20,42 +20,80 @@ type EBSVolumeResizer struct { } // ConnectToProvider connects to AWS. -func (c *EBSVolumeResizer) ConnectToProvider() error { - sess, err := session.NewSession(&aws.Config{Region: aws.String(c.AWSRegion)}) +func (r *EBSVolumeResizer) ConnectToProvider() error { + sess, err := session.NewSession(&aws.Config{Region: aws.String(r.AWSRegion)}) if err != nil { return fmt.Errorf("could not establish AWS session: %v", err) } - c.connection = ec2.New(sess) + r.connection = ec2.New(sess) return nil } // IsConnectedToProvider checks if AWS connection is established. -func (c *EBSVolumeResizer) IsConnectedToProvider() bool { - return c.connection != nil +func (r *EBSVolumeResizer) IsConnectedToProvider() bool { + return r.connection != nil } // VolumeBelongsToProvider checks if the given persistent volume is backed by EBS. -func (c *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool { +func (r *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool { return pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner } -// GetProviderVolumeID converts aws://eu-central-1b/vol-00f93d4827217c629 to vol-00f93d4827217c629 for EBS volumes -func (c *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) { - volumeID := pv.Spec.AWSElasticBlockStore.VolumeID - if volumeID == "" { - return "", fmt.Errorf("volume id is empty for volume %q", pv.Name) - } +// ExtractVolumeID extracts volumeID +func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) { idx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1 if idx == 0 { - return "", fmt.Errorf("malfored EBS volume id %q", volumeID) + return "", fmt.Errorf("malformed EBS volume id %q", volumeID) } return volumeID[idx:], nil } +// GetProviderVolumeID converts aws://eu-central-1b/vol-00f93d4827217c629 to vol-00f93d4827217c629 for EBS volumes +func (r *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) { + volumeID := pv.Spec.AWSElasticBlockStore.VolumeID + if volumeID == "" { + return "", fmt.Errorf("got empty volume id for volume %v", pv) + } + + return r.ExtractVolumeID(volumeID) +} + +// DescribeVolumes ... +func (r *EBSVolumeResizer) DescribeVolumes(volumeIds []string) ([]VolumeProperties, error) { + if !r.IsConnectedToProvider() { + err := r.ConnectToProvider() + if err != nil { + return nil, err + } + } + + volumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: aws.StringSlice((volumeIds))}) + if err != nil { + return nil, err + } + + p := []VolumeProperties{} + if nil == volumeOutput.Volumes { + return p, nil + } + + for _, v := range volumeOutput.Volumes { + if *v.VolumeType == "gp3" { + p = append(p, VolumeProperties{VolumeID: *v.VolumeId, Size: *v.Size, VolumeType: *v.VolumeType, Iops: *v.Iops, Throughput: *v.Throughput}) + } else if *v.VolumeType == "gp2" { + p = append(p, VolumeProperties{VolumeID: *v.VolumeId, Size: *v.Size, VolumeType: *v.VolumeType}) + } else { + return nil, fmt.Errorf("Discovered unexpected volume type %s %s", *v.VolumeId, *v.VolumeType) + } + } + + return p, nil +} + // ResizeVolume actually calls AWS API to resize the EBS volume if necessary. -func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { +func (r *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { /* first check if the volume is already of a requested size */ - volumeOutput, err := c.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}}) + volumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}}) if err != nil { return fmt.Errorf("could not get information about the volume: %v", err) } @@ -68,7 +106,7 @@ func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { return nil } input := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID} - output, err := c.connection.ModifyVolume(&input) + output, err := r.connection.ModifyVolume(&input) if err != nil { return fmt.Errorf("could not modify persistent volume: %v", err) } @@ -87,7 +125,54 @@ func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { in := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}} return retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout, func() (bool, error) { - out, err := c.connection.DescribeVolumesModifications(&in) + out, err := r.connection.DescribeVolumesModifications(&in) + if err != nil { + return false, fmt.Errorf("could not describe volume modification: %v", err) + } + if len(out.VolumesModifications) != 1 { + return false, fmt.Errorf("describe volume modification didn't return one record for volume %q", volumeID) + } + if *out.VolumesModifications[0].VolumeId != volumeID { + return false, fmt.Errorf("non-matching volume id when describing modifications: %q is different from %q", + *out.VolumesModifications[0].VolumeId, volumeID) + } + return *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil + }) +} + +// ModifyVolume Modify EBS volume +func (r *EBSVolumeResizer) ModifyVolume(volumeID string, newType string, newSize int64, iops int64, throughput int64) error { + /* first check if the volume is already of a requested size */ + volumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}}) + if err != nil { + return fmt.Errorf("could not get information about the volume: %v", err) + } + vol := volumeOutput.Volumes[0] + if *vol.VolumeId != volumeID { + return fmt.Errorf("describe volume %q returned information about a non-matching volume %q", volumeID, *vol.VolumeId) + } + + input := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID, VolumeType: &newType, Iops: &iops, Throughput: &throughput} + output, err := r.connection.ModifyVolume(&input) + if err != nil { + return fmt.Errorf("could not modify persistent volume: %v", err) + } + + state := *output.VolumeModification.ModificationState + if state == constants.EBSVolumeStateFailed { + return fmt.Errorf("could not modify persistent volume %q: modification state failed", volumeID) + } + if state == "" { + return fmt.Errorf("received empty modification status") + } + if state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted { + return nil + } + // wait until the volume reaches the "optimizing" or "completed" state + in := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}} + return retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout, + func() (bool, error) { + out, err := r.connection.DescribeVolumesModifications(&in) if err != nil { return false, fmt.Errorf("could not describe volume modification: %v", err) } @@ -103,7 +188,7 @@ func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { } // DisconnectFromProvider closes connection to the EC2 instance -func (c *EBSVolumeResizer) DisconnectFromProvider() error { - c.connection = nil +func (r *EBSVolumeResizer) DisconnectFromProvider() error { + r.connection = nil return nil } diff --git a/pkg/util/volumes/volumes.go b/pkg/util/volumes/volumes.go index 94c0fffc8..9b44c0d00 100644 --- a/pkg/util/volumes/volumes.go +++ b/pkg/util/volumes/volumes.go @@ -1,8 +1,17 @@ package volumes -import ( - "k8s.io/api/core/v1" -) +//go:generate mockgen -package mocks -destination=$PWD/mocks/$GOFILE -source=$GOFILE -build_flags=-mod=vendor + +import v1 "k8s.io/api/core/v1" + +// VolumeProperties ... +type VolumeProperties struct { + VolumeID string + VolumeType string + Size int64 + Iops int64 + Throughput int64 +} // VolumeResizer defines the set of methods used to implememnt provider-specific resizing of persistent volumes. type VolumeResizer interface { @@ -10,6 +19,9 @@ type VolumeResizer interface { IsConnectedToProvider() bool VolumeBelongsToProvider(pv *v1.PersistentVolume) bool GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) + ExtractVolumeID(volumeID string) (string, error) ResizeVolume(providerVolumeID string, newSize int64) error + ModifyVolume(providerVolumeID string, newType string, newSize int64, iops int64, throughput int64) error DisconnectFromProvider() error + DescribeVolumes(providerVolumesID []string) ([]VolumeProperties, error) } diff --git a/ui/app/src/postgresql.tag.pug b/ui/app/src/postgresql.tag.pug index 9edae99d3..c557e4da8 100644 --- a/ui/app/src/postgresql.tag.pug +++ b/ui/app/src/postgresql.tag.pug @@ -74,11 +74,13 @@ postgresql .alert.alert-info(if='{ !progress.requestStatus }') PostgreSQL cluster requested .alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed - .alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() }) + .alert.alert-success(if='{ progress.requestStatus === "OK" }') Manifest creation successful ({ new Date(progress.createdTimestamp).toLocaleString() }) .alert.alert-info(if='{ !progress.postgresql }') PostgreSQL cluster manifest pending .alert.alert-success(if='{ progress.postgresql }') PostgreSQL cluster manifest created + .alert.alert-danger(if='{progress.status && progress.status.PostgresClusterStatus == "CreateFailed"}') Cluster creation failed: Check events and cluster name! + .alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending .alert.alert-success(if='{ progress.statefulSet }') StatefulSet created @@ -127,6 +129,8 @@ postgresql this.progress.pooler = false this.progress.postgresql = true this.progress.postgresqlManifest = data + // copy status as we delete later for edit + this.progress.status = data.status this.progress.createdTimestamp = data.metadata.creationTimestamp this.progress.poolerEnabled = data.spec.enableConnectionPooler this.uid = this.progress.postgresqlManifest.metadata.uid @@ -203,6 +207,7 @@ postgresql delete manifest.metadata.annotations[last_applied] } + delete manifest.metadata.managedFields delete manifest.metadata.creationTimestamp delete manifest.metadata.deletionGracePeriodSeconds delete manifest.metadata.deletionTimestamp diff --git a/ui/app/src/postgresqls.tag.pug b/ui/app/src/postgresqls.tag.pug index 250c175ec..38e5fcd9d 100644 --- a/ui/app/src/postgresqls.tag.pug +++ b/ui/app/src/postgresqls.tag.pug @@ -63,10 +63,8 @@ postgresqls td(style='white-space: pre') | { namespace } td - a( - href='/#/status/{ cluster_path(this) }' - ) - | { name } + a(href='/#/status/{ cluster_path(this) }') { name } + btn.btn-danger(if='{status.PostgresClusterStatus == "CreateFailed"}') Create Failed td { nodes } td { cpu } / { cpu_limit } td { memory } / { memory_limit } @@ -230,7 +228,7 @@ postgresqls ) const calcCosts = this.calcCosts = (nodes, cpu, memory, disk) => { - costs = nodes * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs) + costs = Math.max(nodes, opts.config.min_pods) * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs) return costs.toFixed(2) } diff --git a/ui/manifests/deployment.yaml b/ui/manifests/deployment.yaml index 4161b4fc1..51bb394b3 100644 --- a/ui/manifests/deployment.yaml +++ b/ui/manifests/deployment.yaml @@ -20,7 +20,7 @@ spec: serviceAccountName: postgres-operator-ui containers: - name: "service" - image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.5.0-dirty + image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.6.0 ports: - containerPort: 8081 protocol: "TCP" @@ -68,10 +68,8 @@ spec: "cost_core": 0.0575, "cost_memory": 0.014375, "postgresql_versions": [ + "13", "12", - "11", - "10", - "9.6", - "9.5" + "11" ] } diff --git a/ui/manifests/kustomization.yaml b/ui/manifests/kustomization.yaml new file mode 100644 index 000000000..5803f854e --- /dev/null +++ b/ui/manifests/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- deployment.yaml +- ingress.yaml +- service.yaml +- ui-service-account-rbac.yaml diff --git a/ui/manifests/service.yaml b/ui/manifests/service.yaml index 989ec041e..7b820ca92 100644 --- a/ui/manifests/service.yaml +++ b/ui/manifests/service.yaml @@ -8,7 +8,7 @@ metadata: spec: type: "ClusterIP" selector: - application: "postgres-operator-ui" + name: "postgres-operator-ui" ports: - port: 80 protocol: "TCP" diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index dc2450b9f..5fbb6d24e 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -87,6 +87,7 @@ SPILO_S3_BACKUP_PREFIX = getenv('SPILO_S3_BACKUP_PREFIX', 'spilo/') SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid') TARGET_NAMESPACE = getenv('TARGET_NAMESPACE') GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False) +MIN_PODS= getenv('MIN_PODS', 2) # storage pricing, i.e. https://aws.amazon.com/ebs/pricing/ COST_EBS = float(getenv('COST_EBS', 0.119)) # GB per month @@ -104,6 +105,8 @@ USE_AWS_INSTANCE_PROFILE = ( getenv('USE_AWS_INSTANCE_PROFILE', 'false').lower() != 'false' ) +AWS_ENDPOINT = getenv('AWS_ENDPOINT') + tokens.configure() tokens.manage('read-only') tokens.start() @@ -300,13 +303,14 @@ DEFAULT_UI_CONFIG = { 'users_visible': True, 'databases_visible': True, 'resources_visible': True, - 'postgresql_versions': ['9.6', '10', '11'], + 'postgresql_versions': ['11','12','13'], 'dns_format_string': '{0}.{1}.{2}', 'pgui_link': '', 'static_network_whitelist': {}, 'cost_ebs': COST_EBS, 'cost_core': COST_CORE, - 'cost_memory': COST_MEMORY + 'cost_memory': COST_MEMORY, + 'min_pods': MIN_PODS } @@ -318,6 +322,7 @@ def get_config(): config['resources_visible'] = RESOURCES_VISIBLE config['superuser_team'] = SUPERUSER_TEAM config['target_namespace'] = TARGET_NAMESPACE + config['min_pods'] = MIN_PODS config['namespaces'] = ( [TARGET_NAMESPACE] @@ -491,6 +496,7 @@ def get_postgresqls(): 'uid': uid, 'namespaced_name': namespace + '/' + name, 'full_name': namespace + '/' + name + ('/' + uid if uid else ''), + 'status': status, } for cluster in these( read_postgresqls( @@ -504,6 +510,7 @@ def get_postgresqls(): 'items', ) for spec in [cluster.get('spec', {}) if cluster.get('spec', {}) is not None else {"error": "Invalid spec in manifest"}] + for status in [cluster.get('status', {})] for metadata in [cluster['metadata']] for namespace in [metadata['namespace']] for name in [metadata['name']] @@ -618,6 +625,17 @@ def update_postgresql(namespace: str, cluster: str): if 'enableConnectionPooler' in o['spec']: del o['spec']['enableConnectionPooler'] + if 'enableReplicaConnectionPooler' in postgresql['spec']: + cp = postgresql['spec']['enableReplicaConnectionPooler'] + if not cp: + if 'enableReplicaConnectionPooler' in o['spec']: + del o['spec']['enableReplicaConnectionPooler'] + else: + spec['enableReplicaConnectionPooler'] = True + else: + if 'enableReplicaConnectionPooler' in o['spec']: + del o['spec']['enableReplicaConnectionPooler'] + if 'enableReplicaLoadBalancer' in postgresql['spec']: rlb = postgresql['spec']['enableReplicaLoadBalancer'] if not rlb: @@ -1055,6 +1073,7 @@ def main(port, secret_key, debug, clusters: list): logger.info(f'Tokeninfo URL: {TOKENINFO_URL}') logger.info(f'Use AWS instance_profile: {USE_AWS_INSTANCE_PROFILE}') logger.info(f'WAL-E S3 endpoint: {WALE_S3_ENDPOINT}') + logger.info(f'AWS S3 endpoint: {AWS_ENDPOINT}') if TARGET_NAMESPACE is None: @on_exception( diff --git a/ui/operator_ui/spiloutils.py b/ui/operator_ui/spiloutils.py index ea347a84d..6b1d394a1 100644 --- a/ui/operator_ui/spiloutils.py +++ b/ui/operator_ui/spiloutils.py @@ -16,6 +16,8 @@ logger = getLogger(__name__) session = Session() +AWS_ENDPOINT = getenv('AWS_ENDPOINT') + OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name') COMMON_CLUSTER_LABEL = getenv('COMMON_CLUSTER_LABEL', '{"application":"spilo"}') @@ -266,7 +268,7 @@ def read_stored_clusters(bucket, prefix, delimiter='/'): return [ prefix['Prefix'].split('/')[-2] for prefix in these( - client('s3').list_objects( + client('s3', endpoint_url=AWS_ENDPOINT).list_objects( Bucket=bucket, Delimiter=delimiter, Prefix=prefix, @@ -287,7 +289,7 @@ def read_versions( return [ 'base' if uid == 'wal' else uid for prefix in these( - client('s3').list_objects( + client('s3', endpoint_url=AWS_ENDPOINT).list_objects( Bucket=bucket, Delimiter=delimiter, Prefix=prefix + pg_cluster + delimiter, @@ -300,6 +302,7 @@ def read_versions( if uid == 'wal' or defaulting(lambda: UUID(uid)) ] +BACKUP_VERSION_PREFIXES = ['','9.5/', '9.6/', '10/','11/', '12/', '13/'] def read_basebackups( pg_cluster, @@ -312,18 +315,24 @@ def read_basebackups( ): environ['WALE_S3_ENDPOINT'] = s3_endpoint suffix = '' if uid == 'base' else '/' + uid - return [ - { - key: value - for key, value in basebackup.__dict__.items() - if isinstance(value, str) or isinstance(value, int) - } - for basebackup in Attrs.call( - f=configure_backup_cxt, - aws_instance_profile=use_aws_instance_profile, - s3_prefix=f's3://{bucket}/{prefix}{pg_cluster}{suffix}/wal/', - )._backup_list(detail=True)._backup_list(prefix=f"{prefix}{pg_cluster}{suffix}/wal/") - ] + backups = [] + + for vp in BACKUP_VERSION_PREFIXES: + + backups = backups + [ + { + key: value + for key, value in basebackup.__dict__.items() + if isinstance(value, str) or isinstance(value, int) + } + for basebackup in Attrs.call( + f=configure_backup_cxt, + aws_instance_profile=use_aws_instance_profile, + s3_prefix=f's3://{bucket}/{prefix}{pg_cluster}{suffix}/wal/{vp}', + )._backup_list(detail=True) + ] + + return backups def parse_time(s: str): diff --git a/ui/run_local.sh b/ui/run_local.sh index e331b2414..79723680a 100755 --- a/ui/run_local.sh +++ b/ui/run_local.sh @@ -23,11 +23,9 @@ default_operator_ui_config='{ "cost_core": 0.0575, "cost_memory": 0.014375, "postgresql_versions": [ + "13", "12", - "11", - "10", - "9.6", - "9.5" + "11" ], "static_network_whitelist": { "localhost": ["172.0.0.1/32"]