diff --git a/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md b/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md new file mode 100644 index 000000000..a4dec9409 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md @@ -0,0 +1,19 @@ +--- +name: Postgres Operator issue template +about: How are you using the operator? +title: '' +labels: '' +assignees: '' + +--- + +Please, answer some short questions which should help us to understand your problem / question better? + +- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.0 +- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s] +- **Are you running Postgres Operator in production?** [yes | no] +- **Type of issue?** [Bug report, question, feature request, etc.] + +Some general remarks when posting a bug report: +- Please, check the operator, pod (Patroni) and postgresql logs first. When copy-pasting many log lines please do it in a separate GitHub gist together with your Postgres CRD and configuration manifest. +- If you feel this issue might be more related to the [Spilo](https://github.com/zalando/spilo/issues) docker image or [Patroni](https://github.com/zalando/patroni/issues), consider opening issues in the respective repos. diff --git a/.github/PULL_REQUEST_TEMPLATE/postgres-operator-pull-request-template.md b/.github/PULL_REQUEST_TEMPLATE/postgres-operator-pull-request-template.md new file mode 100644 index 000000000..78ebc4993 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/postgres-operator-pull-request-template.md @@ -0,0 +1,18 @@ +## Problem description + + + +## Linked issues + + + +## Checklist + +Thanks for submitting a pull request to the Postgres Operator project. +Please, ensure your contribution matches the following items: + +- [ ] Your go code is [formatted](https://blog.golang.org/gofmt). Your IDE should do it automatically for you. +- [ ] You have updated [generated code](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#code-generation) when introducing new fields to the `acid.zalan.do` api package. +- [ ] New [configuration options](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#introduce-additional-configuration-parameters) are reflected in CRD validation, helm charts and sample manifests. +- [ ] New functionality is covered by [unit](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#unit-tests) and/or [e2e](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#end-to-end-tests) tests. +- [ ] You have checked existing open PRs for possible overlay and referenced them. diff --git a/.github/workflows/run_e2e.yaml b/.github/workflows/run_e2e.yaml new file mode 100644 index 000000000..cff0d49ef --- /dev/null +++ b/.github/workflows/run_e2e.yaml @@ -0,0 +1,25 @@ +name: operator-e2e-tests + +on: + pull_request: + push: + branches: + - master + +jobs: + tests: + name: End-2-End tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - uses: actions/setup-go@v2 + with: + go-version: "^1.15.6" + - name: Make dependencies + run: make deps mocks + - name: Compile + run: make linux + - name: Run unit tests + run: go test ./... + - name: Run end-2-end tests + run: make e2e diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml new file mode 100644 index 000000000..d66514a5c --- /dev/null +++ b/.github/workflows/run_tests.yaml @@ -0,0 +1,30 @@ +name: operator-tests + +on: + pull_request: + push: + branches: + - master + +jobs: + tests: + name: Unit tests and coverage + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - uses: actions/setup-go@v2 + with: + go-version: "^1.15.6" + - name: Make dependencies + run: make deps mocks + - name: Compile + run: make linux + - name: Run unit tests + run: go test -race -covermode atomic -coverprofile=coverage.out ./... + - name: Convert coverage to lcov + uses: jandelgado/gcov2lcov-action@v1.0.8 + - name: Coveralls + uses: coverallsapp/github-action@master + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + path-to-lcov: coverage.lcov diff --git a/.gitignore b/.gitignore index 991fe754f..e062f8479 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,8 @@ _obj _test _manifests +_tmp +github.com # Architecture specific extensions/prefixes *.[568vq] @@ -26,7 +28,9 @@ _testmain.go /vendor/ /build/ /docker/build/ +/github.com/ .idea +.vscode scm-source.json @@ -44,6 +48,8 @@ __pycache__/ # Distribution / packaging .Python +ui/app/node_modules +ui/operator_ui/static/build build/ develop-eggs/ dist/ @@ -92,3 +98,5 @@ e2e/manifests # Translations *.mo *.pot + +mocks diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 589eb03a4..000000000 --- a/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -dist: trusty -sudo: false - -branches: - only: - - master - -language: go - -go: - - "1.12.x" - -before_install: - - go get github.com/mattn/goveralls - -install: - - make deps - -script: - - hack/verify-codegen.sh - - travis_wait 20 goveralls -service=travis-ci -package ./pkg/... -v - - make e2e diff --git a/CODEOWNERS b/CODEOWNERS index 96fe74510..398856c66 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ # global owners -* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih +* @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih diff --git a/LICENSE b/LICENSE index da62089ec..7c0f459a5 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2020 Zalando SE +Copyright (c) 2021 Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/MAINTAINERS b/MAINTAINERS index 4f4ca87ba..572e6d971 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1,3 +1,5 @@ -Oleksii Kliukin Dmitrii Dolgov Sergey Dudoladov +Felix Kunde +Jan Mussler +Rafia Sabih \ No newline at end of file diff --git a/Makefile b/Makefile index dc1c790fe..fe2387670 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: clean local test linux macos docker push scm-source.json e2e +.PHONY: clean local test linux macos mocks docker push scm-source.json e2e BINARY ?= postgres-operator BUILD_FLAGS ?= -v @@ -24,12 +24,16 @@ PKG := `go list ./... | grep -v /vendor/` ifeq ($(DEBUG),1) DOCKERFILE = DebugDockerfile - DEBUG_POSTFIX := -debug + DEBUG_POSTFIX := -debug-$(shell date hhmmss) BUILD_FLAGS += -gcflags "-N -l" else DOCKERFILE = Dockerfile endif +ifeq ($(FRESH),1) + DEBUG_FRESH=$(shell date +"%H-%M-%S") +endif + ifdef CDP_PULL_REQUEST_NUMBER CDP_TAG := -${CDP_BUILD_VERSION} endif @@ -66,7 +70,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE} docker-context echo "Version ${VERSION}" echo "CDP tag ${CDP_TAG}" echo "git describe $(shell git describe --tags --always --dirty)" - cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" . + cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" . indocker-race: docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.8.1 bash -c "make linux" @@ -77,9 +81,12 @@ push: scm-source.json: .git echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json +mocks: + GO111MODULE=on go generate ./... + tools: - GO111MODULE=on go get -u honnef.co/go/tools/cmd/staticcheck - GO111MODULE=on go get k8s.io/client-go@kubernetes-1.16.3 + GO111MODULE=on go get k8s.io/client-go@kubernetes-1.19.3 + GO111MODULE=on go get github.com/golang/mock/mockgen@v1.4.4 GO111MODULE=on go mod tidy fmt: @@ -97,4 +104,4 @@ test: GO111MODULE=on go test ./... e2e: docker # build operator image to be tested - cd e2e; make tools test clean + cd e2e; make e2etest \ No newline at end of file diff --git a/README.md b/README.md index a2771efa6..7edb60d84 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,34 @@ # Postgres Operator -[![Build Status](https://travis-ci.org/zalando/postgres-operator.svg?branch=master)](https://travis-ci.org/zalando/postgres-operator) -[![Coverage Status](https://coveralls.io/repos/github/zalando/postgres-operator/badge.svg)](https://coveralls.io/github/zalando/postgres-operator) -[![Go Report Card](https://goreportcard.com/badge/github.com/zalando/postgres-operator)](https://goreportcard.com/report/github.com/zalando/postgres-operator) -[![GoDoc](https://godoc.org/github.com/zalando/postgres-operator?status.svg)](https://godoc.org/github.com/zalando/postgres-operator) -[![golangci](https://golangci.com/badges/github.com/zalando/postgres-operator.svg)](https://golangci.com/r/github.com/zalando/postgres-operator) +![Tests](https://github.com/zalando/postgres-operator/workflows/operator-tests/badge.svg) +![E2E Tests](https://github.com/zalando/postgres-operator/workflows/operator-e2e-tests/badge.svg) +[![Coverage Status](https://coveralls.io/repos/github/zalando/postgres-operator/badge.svg?branch=master)](https://coveralls.io/github/zalando/postgres-operator?branch=master) -The Postgres Operator enables highly-available [PostgreSQL](https://www.postgresql.org/) +The Postgres Operator delivers an easy to run highly-available [PostgreSQL](https://www.postgresql.org/) clusters on Kubernetes (K8s) powered by [Patroni](https://github.com/zalando/spilo). -It is configured only through manifests to ease integration into automated CI/CD -pipelines with no access to Kubernetes directly. +It is configured only through Postgres manifests (CRDs) to ease integration into automated CI/CD +pipelines with no access to Kubernetes API directly, promoting infrastructure as code vs manual operations. ### Operator features -* Rolling updates on Postgres cluster changes -* Volume resize without Pod restarts -* Cloning Postgres clusters -* Logical Backups to S3 Bucket +* Rolling updates on Postgres cluster changes, incl. quick minor version updates +* Live volume resize without pod restarts (AWS EBS, PVC) +* Database connection pooler with PGBouncer +* Restore and cloning Postgres clusters (incl. major version upgrade) +* Additionally logical backups to S3 bucket can be configured * Standby cluster from S3 WAL archive * Configurable for non-cloud environments +* Basic credential and user management on K8s, eases application deployments +* Support for custom TLS certificates * UI to create and edit Postgres cluster manifests +* Works well on Amazon AWS, Google Cloud, OpenShift and locally on Kind +* Base support for AWS EBS gp3 migration (iops, throughput pending) ### PostgreSQL features -* Supports PostgreSQL 9.6+ +* Supports PostgreSQL 13, starting from 9.5+ * Streaming replication cluster via Patroni * Point-In-Time-Recovery with [pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) / @@ -47,13 +50,35 @@ pipelines with no access to Kubernetes directly. [timescaledb](https://github.com/timescale/timescaledb) The Postgres Operator has been developed at Zalando and is being used in -production for over two years. +production for over three years. + +## Notes on Postgres 13 support + +If you are new to the operator, you can skip this and just start using the Postgres operator as is, Postgres 13 is ready to go. + +The Postgres operator supports Postgres 13 with the new Spilo Image that includes also the recent Patroni version to support PG13 settings. +More work on optimizing restarts and rolling upgrades is pending. + +If you are already using the Postgres operator in older version with a Spilo 12 Docker image you need to be aware of the changes for the backup path. +We introduce the major version into the backup path to smoothen the [major version upgrade](docs/administrator.md#minor-and-major-version-upgrade) that is now supported manually. + +The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths. +This comes at potential performance costs and should be disabled after a few days. + +The new Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p2` + +The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5` + ## Getting started For a quick first impression follow the instructions of this [tutorial](docs/quickstart.md). +## Supported setups of Postgres and Applications + +![Features](docs/diagrams/neutral_operator.png) + ## Documentation There is a browser-friendly version of this documentation at @@ -69,12 +94,6 @@ There is a browser-friendly version of this documentation at * [Postgres manifest reference](docs/reference/cluster_manifest.md) * [Command-line options and environment variables](docs/reference/command_line_and_environment.md) -## Google Summer of Code - -The Postgres Operator made it to the [Google Summer of Code 2019](https://summerofcode.withgoogle.com/organizations/5429926902104064/)! -Check [our ideas](docs/gsoc-2019/ideas.md#google-summer-of-code-2019) -and start discussions in [the issue tracker](https://github.com/zalando/postgres-operator/issues). - ## Community There are two places to get in touch with the community: diff --git a/charts/postgres-operator-ui/Chart.yaml b/charts/postgres-operator-ui/Chart.yaml index a6e46ab3e..9be6c84dd 100644 --- a/charts/postgres-operator-ui/Chart.yaml +++ b/charts/postgres-operator-ui/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: postgres-operator-ui -version: 1.4.0 -appVersion: 1.4.0 +version: 1.6.0 +appVersion: 1.6.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience keywords: @@ -14,8 +14,6 @@ keywords: maintainers: - name: Zalando email: opensource@zalando.de -- name: siku4 - email: sk@sik-net.de sources: - https://github.com/zalando/postgres-operator engine: gotpl diff --git a/charts/postgres-operator-ui/index.yaml b/charts/postgres-operator-ui/index.yaml index 0cd03d6e5..948a52274 100644 --- a/charts/postgres-operator-ui/index.yaml +++ b/charts/postgres-operator-ui/index.yaml @@ -2,11 +2,11 @@ apiVersion: v1 entries: postgres-operator-ui: - apiVersion: v1 - appVersion: 1.4.0 - created: "2020-02-24T15:32:47.610967635+01:00" + appVersion: 1.6.0 + created: "2020-12-18T14:19:25.464717041+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience - digest: 00e0eff7056d56467cd5c975657fbb76c8d01accd25a4b7aca81bc42aeac961d + digest: d7813a235dd1015377c38fd5a14e7679a411c7340a25cfcf5f5294405f9a2eb2 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -18,12 +18,33 @@ entries: maintainers: - email: opensource@zalando.de name: Zalando - - email: sk@sik-net.de - name: siku4 name: postgres-operator-ui sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-ui-1.4.0.tgz - version: 1.4.0 -generated: "2020-02-24T15:32:47.610348278+01:00" + - postgres-operator-ui-1.6.0.tgz + version: 1.6.0 + - apiVersion: v1 + appVersion: 1.5.0 + created: "2020-12-18T14:19:25.464015993+01:00" + description: Postgres Operator UI provides a graphical interface for a convenient + database-as-a-service user experience + digest: c91ea39e6d51d57f4048fb1b6ec53b40823f2690eb88e4e4f1a036367b9fdd61 + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - ui + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + name: postgres-operator-ui + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-ui-1.5.0.tgz + version: 1.5.0 +generated: "2020-12-18T14:19:25.463104102+01:00" diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.4.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.4.0.tgz deleted file mode 100644 index 8d1276dd1..000000000 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.4.0.tgz and /dev/null differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.5.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.5.0.tgz new file mode 100644 index 000000000..d8527f293 Binary files /dev/null and b/charts/postgres-operator-ui/postgres-operator-ui-1.5.0.tgz differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.6.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.6.0.tgz new file mode 100644 index 000000000..68a43b51b Binary files /dev/null and b/charts/postgres-operator-ui/postgres-operator-ui-1.6.0.tgz differ diff --git a/charts/postgres-operator-ui/templates/clusterrole.yaml b/charts/postgres-operator-ui/templates/clusterrole.yaml index 4f76400ec..57a1a6365 100644 --- a/charts/postgres-operator-ui/templates/clusterrole.yaml +++ b/charts/postgres-operator-ui/templates/clusterrole.yaml @@ -38,6 +38,7 @@ rules: - apiGroups: - apps resources: + - deployments - statefulsets verbs: - get diff --git a/charts/postgres-operator-ui/templates/deployment.yaml b/charts/postgres-operator-ui/templates/deployment.yaml index da0280e61..29bf2e670 100644 --- a/charts/postgres-operator-ui/templates/deployment.yaml +++ b/charts/postgres-operator-ui/templates/deployment.yaml @@ -1,5 +1,5 @@ -apiVersion: "apps/v1" -kind: "Deployment" +apiVersion: apps/v1 +kind: Deployment metadata: labels: app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} @@ -21,6 +21,10 @@ spec: team: "acid" # Parameterize? spec: serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} containers: - name: "service" image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" @@ -41,8 +45,12 @@ spec: value: "http://localhost:8081" - name: "OPERATOR_API_URL" value: {{ .Values.envs.operatorApiUrl }} + - name: "OPERATOR_CLUSTER_NAME_LABEL" + value: {{ .Values.envs.operatorClusterNameLabel }} + - name: "RESOURCES_VISIBLE" + value: "{{ .Values.envs.resourcesVisible }}" - name: "TARGET_NAMESPACE" - value: {{ .Values.envs.targetNamespace }} + value: "{{ .Values.envs.targetNamespace }}" - name: "TEAMS" value: |- [ @@ -60,10 +68,8 @@ spec: "resources_visible": true, "users_visible": true, "postgresql_versions": [ + "13", "12", - "11", - "10", - "9.6", - "9.5" + "11" ] } diff --git a/charts/postgres-operator-ui/templates/service.yaml b/charts/postgres-operator-ui/templates/service.yaml index 09adff26f..bc40fbbb1 100644 --- a/charts/postgres-operator-ui/templates/service.yaml +++ b/charts/postgres-operator-ui/templates/service.yaml @@ -11,6 +11,9 @@ spec: ports: - port: {{ .Values.service.port }} targetPort: 8081 + {{- if and (eq .Values.service.type "NodePort") .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} protocol: TCP selector: app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/postgres-operator-ui/values.yaml b/charts/postgres-operator-ui/values.yaml index 148a687c3..dea5007c9 100644 --- a/charts/postgres-operator-ui/values.yaml +++ b/charts/postgres-operator-ui/values.yaml @@ -8,9 +8,15 @@ replicaCount: 1 image: registry: registry.opensource.zalan.do repository: acid/postgres-operator-ui - tag: v1.4.0 + tag: v1.6.0 pullPolicy: "IfNotPresent" +# Optionally specify an array of imagePullSecrets. +# Secrets must be manually created in the namespace. +# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +# imagePullSecrets: +# - name: + rbac: # Specifies whether RBAC resources should be created create: true @@ -25,8 +31,8 @@ serviceAccount: # configure UI pod resources resources: limits: - cpu: 300m - memory: 3000Mi + cpu: 200m + memory: 200Mi requests: cpu: 100m memory: 100Mi @@ -36,12 +42,17 @@ envs: # IMPORTANT: While operator chart and UI chart are idendependent, this is the interface between # UI and operator API. Insert the service name of the operator API here! operatorApiUrl: "http://postgres-operator:8080" + operatorClusterNameLabel: "cluster-name" + resourcesVisible: "False" targetNamespace: "default" # configure UI service service: type: "ClusterIP" - port: "8080" + port: "80" + # If the type of the service is NodePort a port can be specified using the nodePort field + # If the nodePort field is not specified, or if it has no value, then a random port is used + # notePort: 32521 # configure UI ingress. If needed: "enabled: true" ingress: diff --git a/charts/postgres-operator/Chart.yaml b/charts/postgres-operator/Chart.yaml index 89468dfa4..e5a66b6e3 100644 --- a/charts/postgres-operator/Chart.yaml +++ b/charts/postgres-operator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: postgres-operator -version: 1.4.0 -appVersion: 1.4.0 +version: 1.6.0 +appVersion: 1.6.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes keywords: diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index e5dc266c3..b40ac0774 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: operatorconfigurations.acid.zalan.do @@ -15,312 +15,532 @@ spec: singular: operatorconfiguration shortNames: - opconfig - additionalPrinterColumns: - - name: Image - type: string - description: Spilo image to be used for Pods - JSONPath: .configuration.docker_image - - name: Cluster-Label - type: string - description: Label for K8s resources created by operator - JSONPath: .configuration.kubernetes.cluster_name_label - - name: Service-Account - type: string - description: Name of service account to be used - JSONPath: .configuration.kubernetes.pod_service_account_name - - name: Min-Instances - type: integer - description: Minimum number of instances per Postgres cluster - JSONPath: .configuration.min_instances - - name: Age - type: date - JSONPath: .metadata.creationTimestamp + categories: + - all scope: Namespaced - subresources: - status: {} - version: v1 - validation: - openAPIV3Schema: - type: object - required: - - kind - - apiVersion - - configuration - properties: - kind: - type: string - enum: - - OperatorConfiguration - apiVersion: - type: string - enum: - - acid.zalan.do/v1 - configuration: - type: object - properties: - docker_image: - type: string - enable_crd_validation: - type: boolean - enable_shm_volume: - type: boolean - etcd_host: - type: string - max_instances: - type: integer - minimum: -1 # -1 = disabled - min_instances: - type: integer - minimum: -1 # -1 = disabled - resync_period: - type: string - repair_period: - type: string - set_memory_request_to_limit: - type: boolean - sidecar_docker_images: - type: object - additionalProperties: - type: string - workers: - type: integer - minimum: 1 - users: - type: object - properties: - replication_username: - type: string - super_username: - type: string - kubernetes: - type: object - properties: - cluster_domain: - type: string - cluster_labels: - type: object - additionalProperties: - type: string - cluster_name_label: - type: string - custom_pod_annotations: - type: object - additionalProperties: - type: string - enable_init_containers: - type: boolean - enable_pod_antiaffinity: - type: boolean - enable_pod_disruption_budget: - type: boolean - enable_sidecars: - type: boolean - infrastructure_roles_secret_name: - type: string - inherited_labels: - type: array - items: - type: string - master_pod_move_timeout: - type: string - node_readiness_label: - type: object - additionalProperties: - type: string - oauth_token_secret_name: - type: string - pdb_name_format: - type: string - pod_antiaffinity_topology_key: - type: string - pod_environment_configmap: - type: string - pod_management_policy: - type: string - enum: - - "ordered_ready" - - "parallel" - pod_priority_class_name: - type: string - pod_role_label: - type: string - pod_service_account_definition: - type: string - pod_service_account_name: - type: string - pod_service_account_role_definition: - type: string - pod_service_account_role_binding_definition: - type: string - pod_terminate_grace_period: - type: string - secret_name_template: - type: string - spilo_fsgroup: - type: integer - spilo_privileged: - type: boolean - toleration: - type: object - additionalProperties: - type: string - watched_namespace: - type: string - postgres_pod_resources: - type: object - properties: - default_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - min_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - min_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - timeouts: - type: object - properties: - pod_label_wait_timeout: - type: string - pod_deletion_wait_timeout: - type: string - ready_wait_interval: - type: string - ready_wait_timeout: - type: string - resource_check_interval: - type: string - resource_check_timeout: - type: string - load_balancer: - type: object - properties: - custom_service_annotations: - type: object - additionalProperties: - type: string - db_hosted_zone: - type: string - enable_master_load_balancer: - type: boolean - enable_replica_load_balancer: - type: boolean - master_dns_name_format: - type: string - replica_dns_name_format: - type: string - aws_or_gcp: - type: object - properties: - additional_secret_mount: - type: string - additional_secret_mount_path: - type: string - aws_region: - type: string - kube_iam_role: - type: string - log_s3_bucket: - type: string - wal_s3_bucket: - type: string - logical_backup: - type: object - properties: - logical_backup_docker_image: - type: string - logical_backup_s3_access_key_id: - type: string - logical_backup_s3_bucket: - type: string - logical_backup_s3_endpoint: - type: string - logical_backup_s3_region: - type: string - logical_backup_s3_secret_access_key: - type: string - logical_backup_s3_sse: - type: string - logical_backup_schedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' - debug: - type: object - properties: - debug_logging: - type: boolean - enable_database_access: - type: boolean - teams_api: - type: object - properties: - enable_admin_role_for_users: - type: boolean - enable_team_superuser: - type: boolean - enable_teams_api: - type: boolean - pam_configuration: - type: string - pam_role_name: - type: string - postgres_superuser_teams: - type: array - items: - type: string - protected_role_names: - type: array - items: - type: string - team_admin_role: - type: string - team_api_role_configuration: - type: object - additionalProperties: - type: string - teams_api_url: - type: string - logging_rest_api: - type: object - properties: - api_port: - type: integer - cluster_history_entries: - type: integer - ring_log_lines: - type: integer - scalyr: - type: object - properties: - scalyr_api_key: - type: string - scalyr_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - scalyr_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - scalyr_image: - type: string - scalyr_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - scalyr_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - scalyr_server_url: - type: string - status: - type: object - additionalProperties: + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Image + type: string + description: Spilo image to be used for Pods + jsonPath: .configuration.docker_image + - name: Cluster-Label + type: string + description: Label for K8s resources created by operator + jsonPath: .configuration.kubernetes.cluster_name_label + - name: Service-Account + type: string + description: Name of service account to be used + jsonPath: .configuration.kubernetes.pod_service_account_name + - name: Min-Instances + type: integer + description: Minimum number of instances per Postgres cluster + jsonPath: .configuration.min_instances + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - configuration + properties: + kind: type: string + enum: + - OperatorConfiguration + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + configuration: + type: object + properties: + docker_image: + type: string + default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p2" + enable_crd_validation: + type: boolean + default: true + enable_lazy_spilo_upgrade: + type: boolean + default: false + enable_pgversion_env_var: + type: boolean + default: true + enable_shm_volume: + type: boolean + default: true + enable_spilo_wal_path_compat: + type: boolean + default: false + etcd_host: + type: string + default: "" + kubernetes_use_configmaps: + type: boolean + default: false + max_instances: + type: integer + minimum: -1 # -1 = disabled + default: -1 + min_instances: + type: integer + minimum: -1 # -1 = disabled + default: -1 + resync_period: + type: string + default: "30m" + repair_period: + type: string + default: "5m" + set_memory_request_to_limit: + type: boolean + default: false + sidecar_docker_images: + type: object + additionalProperties: + type: string + sidecars: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + workers: + type: integer + minimum: 1 + default: 8 + users: + type: object + properties: + replication_username: + type: string + default: standby + super_username: + type: string + default: postgres + kubernetes: + type: object + properties: + cluster_domain: + type: string + default: "cluster.local" + cluster_labels: + type: object + additionalProperties: + type: string + default: + application: spilo + cluster_name_label: + type: string + default: "cluster-name" + custom_pod_annotations: + type: object + additionalProperties: + type: string + delete_annotation_date_key: + type: string + delete_annotation_name_key: + type: string + downscaler_annotations: + type: array + items: + type: string + enable_init_containers: + type: boolean + default: true + enable_pod_antiaffinity: + type: boolean + default: false + enable_pod_disruption_budget: + type: boolean + default: true + enable_sidecars: + type: boolean + default: true + infrastructure_roles_secret_name: + type: string + infrastructure_roles_secrets: + type: array + nullable: true + items: + type: object + required: + - secretname + - userkey + - passwordkey + properties: + secretname: + type: string + userkey: + type: string + passwordkey: + type: string + rolekey: + type: string + defaultuservalue: + type: string + defaultrolevalue: + type: string + details: + type: string + template: + type: boolean + inherited_annotations: + type: array + items: + type: string + inherited_labels: + type: array + items: + type: string + master_pod_move_timeout: + type: string + default: "20m" + node_readiness_label: + type: object + additionalProperties: + type: string + oauth_token_secret_name: + type: string + default: "postgresql-operator" + pdb_name_format: + type: string + default: "postgres-{cluster}-pdb" + pod_antiaffinity_topology_key: + type: string + default: "kubernetes.io/hostname" + pod_environment_configmap: + type: string + pod_environment_secret: + type: string + pod_management_policy: + type: string + enum: + - "ordered_ready" + - "parallel" + default: "ordered_ready" + pod_priority_class_name: + type: string + pod_role_label: + type: string + default: "spilo-role" + pod_service_account_definition: + type: string + default: "" + pod_service_account_name: + type: string + default: "postgres-pod" + pod_service_account_role_definition: + type: string + default: "" + pod_service_account_role_binding_definition: + type: string + default: "" + pod_terminate_grace_period: + type: string + default: "5m" + secret_name_template: + type: string + default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + spilo_runasuser: + type: integer + spilo_runasgroup: + type: integer + spilo_fsgroup: + type: integer + spilo_privileged: + type: boolean + default: false + storage_resize_mode: + type: string + enum: + - "ebs" + - "pvc" + - "off" + default: "pvc" + toleration: + type: object + additionalProperties: + type: string + watched_namespace: + type: string + postgres_pod_resources: + type: object + properties: + default_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "1" + default_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "100m" + default_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "500Mi" + default_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "100Mi" + min_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "250m" + min_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "250Mi" + timeouts: + type: object + properties: + pod_label_wait_timeout: + type: string + default: "10m" + pod_deletion_wait_timeout: + type: string + default: "10m" + ready_wait_interval: + type: string + default: "4s" + ready_wait_timeout: + type: string + default: "30s" + resource_check_interval: + type: string + default: "3s" + resource_check_timeout: + type: string + default: "10m" + load_balancer: + type: object + properties: + custom_service_annotations: + type: object + additionalProperties: + type: string + db_hosted_zone: + type: string + default: "db.example.com" + enable_master_load_balancer: + type: boolean + default: true + enable_replica_load_balancer: + type: boolean + default: false + external_traffic_policy: + type: string + enum: + - "Cluster" + - "Local" + default: "Cluster" + master_dns_name_format: + type: string + default: "{cluster}.{team}.{hostedzone}" + replica_dns_name_format: + type: string + default: "{cluster}-repl.{team}.{hostedzone}" + aws_or_gcp: + type: object + properties: + additional_secret_mount: + type: string + additional_secret_mount_path: + type: string + default: "/meta/credentials" + aws_region: + type: string + default: "eu-central-1" + enable_ebs_gp3_migration: + type: boolean + default: false + enable_ebs_gp3_migration_max_size: + type: integer + default: 1000 + gcp_credentials: + type: string + kube_iam_role: + type: string + log_s3_bucket: + type: string + wal_gs_bucket: + type: string + wal_s3_bucket: + type: string + logical_backup: + type: object + properties: + logical_backup_docker_image: + type: string + default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0" + logical_backup_google_application_credentials: + type: string + logical_backup_job_prefix: + type: string + default: "logical-backup-" + logical_backup_provider: + type: string + default: "s3" + logical_backup_s3_access_key_id: + type: string + logical_backup_s3_bucket: + type: string + logical_backup_s3_endpoint: + type: string + logical_backup_s3_region: + type: string + logical_backup_s3_secret_access_key: + type: string + logical_backup_s3_sse: + type: string + logical_backup_schedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + default: "30 00 * * *" + debug: + type: object + properties: + debug_logging: + type: boolean + default: true + enable_database_access: + type: boolean + default: true + teams_api: + type: object + properties: + enable_admin_role_for_users: + type: boolean + default: true + enable_postgres_team_crd: + type: boolean + default: true + enable_postgres_team_crd_superusers: + type: boolean + default: false + enable_team_superuser: + type: boolean + default: false + enable_teams_api: + type: boolean + default: true + pam_configuration: + type: string + default: "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees" + pam_role_name: + type: string + default: "zalandos" + postgres_superuser_teams: + type: array + items: + type: string + protected_role_names: + type: array + items: + type: string + default: + - admin + team_admin_role: + type: string + default: "admin" + team_api_role_configuration: + type: object + additionalProperties: + type: string + default: + log_statement: all + teams_api_url: + type: string + default: "https://teams.example.com/api/" + logging_rest_api: + type: object + properties: + api_port: + type: integer + default: 8080 + cluster_history_entries: + type: integer + default: 1000 + ring_log_lines: + type: integer + default: 100 + scalyr: # deprecated + type: object + properties: + scalyr_api_key: + type: string + scalyr_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "1" + scalyr_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "100m" + scalyr_image: + type: string + scalyr_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "500Mi" + scalyr_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "50Mi" + scalyr_server_url: + type: string + default: "https://upload.eu.scalyr.com" + connection_pooler: + type: object + properties: + connection_pooler_schema: + type: string + default: "pooler" + connection_pooler_user: + type: string + default: "pooler" + connection_pooler_image: + type: string + default: "registry.opensource.zalan.do/acid/pgbouncer:master-12" + connection_pooler_max_db_connections: + type: integer + default: 60 + connection_pooler_mode: + type: string + enum: + - "session" + - "transaction" + default: "transaction" + connection_pooler_number_of_instances: + type: integer + minimum: 1 + default: 2 + connection_pooler_default_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "1" + connection_pooler_default_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "500m" + connection_pooler_default_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "100Mi" + connection_pooler_default_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "100Mi" + status: + type: object + additionalProperties: + type: string diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index af535e2c8..ad11f6407 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: postgresqls.acid.zalan.do @@ -15,353 +15,561 @@ spec: singular: postgresql shortNames: - pg - additionalPrinterColumns: - - name: Team - type: string - description: Team responsible for Postgres CLuster - JSONPath: .spec.teamId - - name: Version - type: string - description: PostgreSQL version - JSONPath: .spec.postgresql.version - - name: Pods - type: integer - description: Number of Pods per Postgres cluster - JSONPath: .spec.numberOfInstances - - name: Volume - type: string - description: Size of the bound volume - JSONPath: .spec.volume.size - - name: CPU-Request - type: string - description: Requested CPU for Postgres containers - JSONPath: .spec.resources.requests.cpu - - name: Memory-Request - type: string - description: Requested memory for Postgres containers - JSONPath: .spec.resources.requests.memory - - name: Age - type: date - JSONPath: .metadata.creationTimestamp - - name: Status - type: string - description: Current sync status of postgresql resource - JSONPath: .status.PostgresClusterStatus + categories: + - all scope: Namespaced - subresources: - status: {} - version: v1 - validation: - openAPIV3Schema: - type: object - required: - - kind - - apiVersion - - spec - properties: - kind: - type: string - enum: - - postgresql - apiVersion: - type: string - enum: - - acid.zalan.do/v1 - spec: - type: object - required: - - numberOfInstances - - teamId - - postgresql - properties: - allowedSourceRanges: - type: array - nullable: true - items: - type: string - pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$' - clone: - type: object - required: - - cluster - properties: - cluster: - type: string - s3_endpoint: - type: string - s3_access_key_id: - type: string - s3_secret_access_key: - type: string - s3_force_path_style: - type: boolean - s3_wal_path: - type: string - timestamp: - type: string - pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([Zz])|([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$' - # The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC - # Example: 1996-12-19T16:39:57-08:00 - # Note: this field requires a timezone - uid: - format: uuid - type: string - databases: - type: object - additionalProperties: - type: string - # Note: usernames specified here as database owners must be declared in the users key of the spec key. - dockerImage: - type: string - enableLogicalBackup: - type: boolean - enableMasterLoadBalancer: - type: boolean - enableReplicaLoadBalancer: - type: boolean - enableShmVolume: - type: boolean - init_containers: # deprecated - type: array - nullable: true - items: - type: object - additionalProperties: true - initContainers: - type: array - nullable: true - items: - type: object - additionalProperties: true - logicalBackupSchedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' - maintenanceWindows: - type: array - items: - type: string - pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' - numberOfInstances: - type: integer - minimum: 0 - patroni: - type: object - properties: - initdb: + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Team + type: string + description: Team responsible for Postgres CLuster + jsonPath: .spec.teamId + - name: Version + type: string + description: PostgreSQL version + jsonPath: .spec.postgresql.version + - name: Pods + type: integer + description: Number of Pods per Postgres cluster + jsonPath: .spec.numberOfInstances + - name: Volume + type: string + description: Size of the bound volume + jsonPath: .spec.volume.size + - name: CPU-Request + type: string + description: Requested CPU for Postgres containers + jsonPath: .spec.resources.requests.cpu + - name: Memory-Request + type: string + description: Requested memory for Postgres containers + jsonPath: .spec.resources.requests.memory + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + - name: Status + type: string + description: Current sync status of postgresql resource + jsonPath: .status.PostgresClusterStatus + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - postgresql + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + required: + - numberOfInstances + - teamId + - postgresql + - volume + properties: + additionalVolumes: + type: array + items: type: object - additionalProperties: + required: + - name + - mountPath + - volumeSource + properties: + name: + type: string + mountPath: + type: string + targetContainers: + type: array + nullable: true + items: + type: string + volumeSource: + type: object + x-kubernetes-preserve-unknown-fields: true + subPath: + type: string + allowedSourceRanges: + type: array + nullable: true + items: + type: string + pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$' + clone: + type: object + required: + - cluster + properties: + cluster: type: string - pg_hba: - type: array - items: + s3_endpoint: type: string - slots: + s3_access_key_id: + type: string + s3_secret_access_key: + type: string + s3_force_path_style: + type: boolean + s3_wal_path: + type: string + timestamp: + type: string + pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$' + # The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC + # Example: 1996-12-19T16:39:57-08:00 + # Note: this field requires a timezone + uid: + format: uuid + type: string + connectionPooler: + type: object + properties: + dockerImage: + type: string + maxDBConnections: + type: integer + mode: + type: string + enum: + - "session" + - "transaction" + numberOfInstances: + type: integer + minimum: 2 + resources: + type: object + required: + - requests + - limits + properties: + limits: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + requests: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + schema: + type: string + user: + type: string + databases: + type: object + additionalProperties: + type: string + # Note: usernames specified here as database owners must be declared in the users key of the spec key. + dockerImage: + type: string + enableConnectionPooler: + type: boolean + enableReplicaConnectionPooler: + type: boolean + enableLogicalBackup: + type: boolean + enableMasterLoadBalancer: + type: boolean + enableReplicaLoadBalancer: + type: boolean + enableShmVolume: + type: boolean + init_containers: # deprecated + type: array + nullable: true + items: type: object - additionalProperties: + x-kubernetes-preserve-unknown-fields: true + initContainers: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + logicalBackupSchedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + maintenanceWindows: + type: array + items: + type: string + pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' + numberOfInstances: + type: integer + minimum: 0 + patroni: + type: object + properties: + initdb: type: object additionalProperties: type: string - ttl: - type: integer - loop_wait: - type: integer - retry_timeout: - type: integer - maximum_lag_on_failover: - type: integer - podAnnotations: - type: object - additionalProperties: - type: string - pod_priority_class_name: # deprecated - type: string - podPriorityClassName: - type: string - postgresql: - type: object - required: - - version - properties: - version: - type: string - enum: - - "9.3" - - "9.4" - - "9.5" - - "9.6" - - "10" - - "11" - - "12" - parameters: - type: object - additionalProperties: - type: string - replicaLoadBalancer: # deprecated - type: boolean - resources: - type: object - required: - - requests - - limits - properties: - limits: - type: object - required: - - cpu - - memory - properties: - cpu: + loop_wait: + type: integer + maximum_lag_on_failover: + type: integer + pg_hba: + type: array + items: type: string - # Decimal natural followed by m, or decimal natural followed by - # dot followed by up to three decimal digits. - # - # This is because the Kubernetes CPU resource has millis as the - # maximum precision. The actual values are checked in code - # because the regular expression would be huge and horrible and - # not very helpful in validation error messages; this one checks - # only the format of the given number. - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - # Note: the value specified here must not be zero or be lower - # than the corresponding request. - memory: - type: string - # You can express memory as a plain integer or as a fixed-point - # integer using one of these suffixes: E, P, T, G, M, k. You can - # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero or be lower - # than the corresponding request. - requests: - type: object - required: - - cpu - - memory - properties: - cpu: - type: string - # Decimal natural followed by m, or decimal natural followed by - # dot followed by up to three decimal digits. - # - # This is because the Kubernetes CPU resource has millis as the - # maximum precision. The actual values are checked in code - # because the regular expression would be huge and horrible and - # not very helpful in validation error messages; this one checks - # only the format of the given number. - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - # Note: the value specified here must not be zero or be higher - # than the corresponding limit. - memory: - type: string - # You can express memory as a plain integer or as a fixed-point - # integer using one of these suffixes: E, P, T, G, M, k. You can - # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero or be higher - # than the corresponding limit. - serviceAnnotations: - type: object - additionalProperties: - type: string - sidecars: - type: array - nullable: true - items: + retry_timeout: + type: integer + slots: + type: object + additionalProperties: + type: object + additionalProperties: + type: string + synchronous_mode: + type: boolean + synchronous_mode_strict: + type: boolean + ttl: + type: integer + podAnnotations: type: object - additionalProperties: true - spiloFSGroup: - type: integer - standby: - type: object - required: - - s3_wal_path - properties: - s3_wal_path: + additionalProperties: type: string - teamId: - type: string - tolerations: - type: array - items: + pod_priority_class_name: # deprecated + type: string + podPriorityClassName: + type: string + postgresql: type: object required: - - key - - operator - - effect + - version properties: - key: - type: string - operator: + version: type: string enum: - - Equal - - Exists - value: - type: string - effect: - type: string - enum: - - NoExecute - - NoSchedule - - PreferNoSchedule - tolerationSeconds: - type: integer - useLoadBalancer: # deprecated - type: boolean - users: - type: object - additionalProperties: + - "9.3" + - "9.4" + - "9.5" + - "9.6" + - "10" + - "11" + - "12" + - "13" + parameters: + type: object + additionalProperties: + type: string + preparedDatabases: + type: object + additionalProperties: + type: object + properties: + defaultUsers: + type: boolean + extensions: + type: object + additionalProperties: + type: string + schemas: + type: object + additionalProperties: + type: object + properties: + defaultUsers: + type: boolean + defaultRoles: + type: boolean + replicaLoadBalancer: # deprecated + type: boolean + resources: + type: object + required: + - requests + - limits + properties: + limits: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + # Decimal natural followed by m, or decimal natural followed by + # dot followed by up to three decimal digits. + # + # This is because the Kubernetes CPU resource has millis as the + # maximum precision. The actual values are checked in code + # because the regular expression would be huge and horrible and + # not very helpful in validation error messages; this one checks + # only the format of the given number. + # + # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + # Note: the value specified here must not be zero or be lower + # than the corresponding request. + memory: + type: string + # You can express memory as a plain integer or as a fixed-point + # integer using one of these suffixes: E, P, T, G, M, k. You can + # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki + # + # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + # Note: the value specified here must not be zero or be higher + # than the corresponding limit. + requests: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + schedulerName: + type: string + serviceAnnotations: + type: object + additionalProperties: + type: string + sidecars: type: array nullable: true - description: "Role flags specified here must not contradict each other" items: - type: string - enum: - - bypassrls - - BYPASSRLS - - nobypassrls - - NOBYPASSRLS - - createdb - - CREATEDB - - nocreatedb - - NOCREATEDB - - createrole - - CREATEROLE - - nocreaterole - - NOCREATEROLE - - inherit - - INHERIT - - noinherit - - NOINHERIT - - login - - LOGIN - - nologin - - NOLOGIN - - replication - - REPLICATION - - noreplication - - NOREPLICATION - - superuser - - SUPERUSER - - nosuperuser - - NOSUPERUSER - volume: - type: object - required: - - size - properties: - size: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero. - storageClass: - type: string - subPath: - type: string + type: object + x-kubernetes-preserve-unknown-fields: true + spiloRunAsUser: + type: integer + spiloRunAsGroup: + type: integer + spiloFSGroup: + type: integer + standby: + type: object + required: + - s3_wal_path + properties: + s3_wal_path: + type: string + teamId: + type: string + tls: + type: object + required: + - secretName + properties: + secretName: + type: string + certificateFile: + type: string + privateKeyFile: + type: string + caFile: + type: string + caSecretName: + type: string + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + required: + - weight + - preference + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + format: int32 + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + tolerations: + type: array + items: + type: object + required: + - key + - operator + - effect + properties: + key: + type: string + operator: + type: string + enum: + - Equal + - Exists + value: + type: string + effect: + type: string + enum: + - NoExecute + - NoSchedule + - PreferNoSchedule + tolerationSeconds: + type: integer + useLoadBalancer: # deprecated + type: boolean + users: + type: object + additionalProperties: + type: array + nullable: true + description: "Role flags specified here must not contradict each other" + items: + type: string + enum: + - bypassrls + - BYPASSRLS + - nobypassrls + - NOBYPASSRLS + - createdb + - CREATEDB + - nocreatedb + - NOCREATEDB + - createrole + - CREATEROLE + - nocreaterole + - NOCREATEROLE + - inherit + - INHERIT + - noinherit + - NOINHERIT + - login + - LOGIN + - nologin + - NOLOGIN + - replication + - REPLICATION + - noreplication + - NOREPLICATION + - superuser + - SUPERUSER + - nosuperuser + - NOSUPERUSER + volume: + type: object + required: + - size + properties: + iops: + type: integer + size: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + # Note: the value specified here must not be zero. + storageClass: + type: string + subPath: + type: string + throughput: + type: integer + status: + type: object + additionalProperties: + type: string diff --git a/charts/postgres-operator/crds/postgresteams.yaml b/charts/postgres-operator/crds/postgresteams.yaml new file mode 100644 index 000000000..fbf873b84 --- /dev/null +++ b/charts/postgres-operator/crds/postgresteams.yaml @@ -0,0 +1,72 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: postgresteams.acid.zalan.do + labels: + app.kubernetes.io/name: postgres-operator + annotations: + "helm.sh/hook": crd-install +spec: + group: acid.zalan.do + names: + kind: PostgresTeam + listKind: PostgresTeamList + plural: postgresteams + singular: postgresteam + shortNames: + - pgteam + categories: + - all + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - PostgresTeam + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + properties: + additionalSuperuserTeams: + type: object + description: "Map for teamId and associated additional superuser teams" + additionalProperties: + type: array + nullable: true + description: "List of teams to become Postgres superusers" + items: + type: string + additionalTeams: + type: object + description: "Map for teamId and associated additional teams" + additionalProperties: + type: array + nullable: true + description: "List of teams whose members will also be added to the Postgres cluster" + items: + type: string + additionalMembers: + type: object + description: "Map for teamId and associated additional users" + additionalProperties: + type: array + nullable: true + description: "List of users who will also be added to the Postgres cluster" + items: + type: string diff --git a/charts/postgres-operator/index.yaml b/charts/postgres-operator/index.yaml index 53181d74a..6b64fd705 100644 --- a/charts/postgres-operator/index.yaml +++ b/charts/postgres-operator/index.yaml @@ -2,11 +2,11 @@ apiVersion: v1 entries: postgres-operator: - apiVersion: v1 - appVersion: 1.4.0 - created: "2020-02-20T17:39:25.443276193+01:00" + appVersion: 1.6.0 + created: "2020-12-17T16:16:25.639708821+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes - digest: b93ccde5581deb8ed0857136b8ce74ca3f1b7240438fa4415f705764a1300bed + digest: 2f5f527bae0a22b02f2f7b1e2352665cecf489a990e18212444fa34450b97604 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -21,14 +21,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-1.4.0.tgz - version: 1.4.0 + - postgres-operator-1.6.0.tgz + version: 1.6.0 - apiVersion: v1 - appVersion: 1.3.0 - created: "2020-02-20T17:39:25.441532163+01:00" + appVersion: 1.5.0 + created: "2020-12-17T16:16:25.637262877+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes - digest: 7e788fd37daec76a01f6d6f9fe5be5b54f5035e4eba0041e80a760d656537325 + digest: 198351d5db52e65cdf383d6f3e1745d91ac1e2a01121f8476f8b1be728b09531 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -43,30 +43,6 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-1.3.0.tgz - version: 1.3.0 - - apiVersion: v1 - appVersion: 1.2.0 - created: "2020-02-20T17:39:25.440278302+01:00" - description: Postgres Operator creates and manages PostgreSQL clusters running - in Kubernetes - digest: d10710c7cf19f4e266e7704f5d1e98dcfc61bee3919522326c35c22ca7d2f2bf - home: https://github.com/zalando/postgres-operator - keywords: - - postgres - - operator - - cloud-native - - patroni - - spilo - maintainers: - - email: opensource@zalando.de - name: Zalando - - email: kgyoo8232@gmail.com - name: kimxogus - name: postgres-operator - sources: - - https://github.com/zalando/postgres-operator - urls: - - postgres-operator-1.2.0.tgz - version: 1.2.0 -generated: "2020-02-20T17:39:25.439168098+01:00" + - postgres-operator-1.5.0.tgz + version: 1.5.0 +generated: "2020-12-17T16:16:25.635647131+01:00" diff --git a/charts/postgres-operator/postgres-operator-1.2.0.tgz b/charts/postgres-operator/postgres-operator-1.2.0.tgz deleted file mode 100644 index bd725688c..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.2.0.tgz and /dev/null differ diff --git a/charts/postgres-operator/postgres-operator-1.3.0.tgz b/charts/postgres-operator/postgres-operator-1.3.0.tgz deleted file mode 100644 index 460fed532..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.3.0.tgz and /dev/null differ diff --git a/charts/postgres-operator/postgres-operator-1.4.0.tgz b/charts/postgres-operator/postgres-operator-1.4.0.tgz deleted file mode 100644 index a988ed236..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.4.0.tgz and /dev/null differ diff --git a/charts/postgres-operator/postgres-operator-1.5.0.tgz b/charts/postgres-operator/postgres-operator-1.5.0.tgz new file mode 100644 index 000000000..6e1a48ab7 Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.5.0.tgz differ diff --git a/charts/postgres-operator/postgres-operator-1.6.0.tgz b/charts/postgres-operator/postgres-operator-1.6.0.tgz new file mode 100644 index 000000000..bf98cd818 Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.6.0.tgz differ diff --git a/charts/postgres-operator/templates/_helpers.tpl b/charts/postgres-operator/templates/_helpers.tpl index 306613ac3..e49670763 100644 --- a/charts/postgres-operator/templates/_helpers.tpl +++ b/charts/postgres-operator/templates/_helpers.tpl @@ -31,6 +31,20 @@ Create a service account name. {{ default (include "postgres-operator.fullname" .) .Values.serviceAccount.name }} {{- end -}} +{{/* +Create a pod service account name. +*/}} +{{- define "postgres-pod.serviceAccountName" -}} +{{ default (printf "%s-%v" (include "postgres-operator.fullname" .) "pod") .Values.podServiceAccount.name }} +{{- end -}} + +{{/* +Create a controller ID. +*/}} +{{- define "postgres-operator.controllerID" -}} +{{ default (include "postgres-operator.fullname" .) .Values.controllerID.name }} +{{- end -}} + {{/* Create chart name and version as used by the chart label. */}} diff --git a/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml b/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml index c327d9101..33c43822f 100644 --- a/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml +++ b/charts/postgres-operator/templates/clusterrole-postgres-pod.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: postgres-pod + name: {{ include "postgres-pod.serviceAccountName" . }} labels: app.kubernetes.io/name: {{ template "postgres-operator.name" . }} helm.sh/chart: {{ template "postgres-operator.chart" . }} @@ -10,6 +10,27 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} rules: # Patroni needs to watch and manage endpoints +{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }} +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get +{{- else }} - apiGroups: - "" resources: @@ -23,6 +44,7 @@ rules: - patch - update - watch +{{- end }} # Patroni needs to watch pods - apiGroups: - "" @@ -41,6 +63,7 @@ rules: - services verbs: - create +{{- if toString .Values.configKubernetes.spilo_privileged | eq "true" }} # to run privileged pods - apiGroups: - extensions @@ -50,4 +73,5 @@ rules: - privileged verbs: - use +{{- end }} {{ end }} diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index 45a675721..82cb22c95 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -25,6 +25,15 @@ rules: - patch - update - watch +# operator only reads PostgresTeams +- apiGroups: + - acid.zalan.do + resources: + - postgresteams + verbs: + - get + - list + - watch # to create or get/update CRDs when starting up - apiGroups: - apiextensions.k8s.io @@ -35,6 +44,40 @@ rules: - get - patch - update +# to send events to the CRs +- apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - update + - watch +# to manage endpoints/configmaps which are also used by Patroni +{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }} +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get +{{- else }} # to read configuration from ConfigMaps - apiGroups: - "" @@ -42,7 +85,6 @@ rules: - configmaps verbs: - get -# to manage endpoints which are also used by Patroni - apiGroups: - "" resources: @@ -56,6 +98,7 @@ rules: - patch - update - watch +{{- end }} # to CRUD secrets for database access - apiGroups: - "" @@ -84,6 +127,10 @@ rules: - delete - get - list +{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }} + - patch + - update +{{- end }} # to read existing PVs. Creation should be done via dynamic provisioning - apiGroups: - "" @@ -92,7 +139,9 @@ rules: verbs: - get - list +{{- if toString .Values.configKubernetes.storage_resize_mode | eq "ebs" }} - update # only for resizing AWS volumes +{{- end }} # to watch Spilo pods and do rolling updates. Creation via StatefulSet - apiGroups: - "" @@ -128,6 +177,7 @@ rules: - apps resources: - statefulsets + - deployments verbs: - create - delete @@ -179,7 +229,8 @@ rules: verbs: - get - create -# to grant privilege to run privileged pods +{{- if toString .Values.configKubernetes.spilo_privileged | eq "true" }} +# to run privileged pods - apiGroups: - extensions resources: @@ -188,4 +239,5 @@ rules: - privileged verbs: - use +{{- end }} {{ end }} diff --git a/charts/postgres-operator/templates/configmap.yaml b/charts/postgres-operator/templates/configmap.yaml index 0b976294e..87fd752b1 100644 --- a/charts/postgres-operator/templates/configmap.yaml +++ b/charts/postgres-operator/templates/configmap.yaml @@ -9,6 +9,10 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} data: + {{- if .Values.podPriorityClassName }} + pod_priority_class_name: {{ .Values.podPriorityClassName }} + {{- end }} + pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} {{ toYaml .Values.configGeneral | indent 2 }} {{ toYaml .Values.configUsers | indent 2 }} {{ toYaml .Values.configKubernetes | indent 2 }} @@ -19,4 +23,5 @@ data: {{ toYaml .Values.configDebug | indent 2 }} {{ toYaml .Values.configLoggingRestApi | indent 2 }} {{ toYaml .Values.configTeamsApi | indent 2 }} +{{ toYaml .Values.configConnectionPooler | indent 2 }} {{- end }} diff --git a/charts/postgres-operator/templates/deployment.yaml b/charts/postgres-operator/templates/deployment.yaml index 1f7e39bbc..89500ae94 100644 --- a/charts/postgres-operator/templates/deployment.yaml +++ b/charts/postgres-operator/templates/deployment.yaml @@ -37,15 +37,25 @@ spec: image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: + {{- if .Values.enableJsonLogging }} + - name: ENABLE_JSON_LOGGING + value: "true" + {{- end }} {{- if eq .Values.configTarget "ConfigMap" }} - name: CONFIG_MAP_NAME value: {{ template "postgres-operator.fullname" . }} {{- else }} - name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT value: {{ template "postgres-operator.fullname" . }} + {{- end }} + {{- if .Values.controllerID.create }} + - name: CONTROLLER_ID + value: {{ template "postgres-operator.controllerID" . }} {{- end }} resources: {{ toYaml .Values.resources | indent 10 }} + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} {{- if .Values.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets | indent 8 }} diff --git a/charts/postgres-operator/templates/operatorconfiguration.yaml b/charts/postgres-operator/templates/operatorconfiguration.yaml index 06e9c7605..0625e1327 100644 --- a/charts/postgres-operator/templates/operatorconfiguration.yaml +++ b/charts/postgres-operator/templates/operatorconfiguration.yaml @@ -13,6 +13,10 @@ configuration: users: {{ toYaml .Values.configUsers | indent 4 }} kubernetes: + {{- if .Values.podPriorityClassName }} + pod_priority_class_name: {{ .Values.podPriorityClassName }} + {{- end }} + pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} oauth_token_secret_name: {{ template "postgres-operator.fullname" . }} {{ toYaml .Values.configKubernetes | indent 4 }} postgres_pod_resources: @@ -31,6 +35,6 @@ configuration: {{ toYaml .Values.configTeamsApi | indent 4 }} logging_rest_api: {{ toYaml .Values.configLoggingRestApi | indent 4 }} - scalyr: -{{ toYaml .Values.configScalyr | indent 4 }} + connection_pooler: +{{ toYaml .Values.configConnectionPooler | indent 4 }} {{- end }} diff --git a/charts/postgres-operator/templates/postgres-pod-priority-class.yaml b/charts/postgres-operator/templates/postgres-pod-priority-class.yaml new file mode 100644 index 000000000..7ee0f2e55 --- /dev/null +++ b/charts/postgres-operator/templates/postgres-pod-priority-class.yaml @@ -0,0 +1,15 @@ +{{- if .Values.podPriorityClassName }} +apiVersion: scheduling.k8s.io/v1 +description: 'Use only for databases controlled by Postgres operator' +kind: PriorityClass +metadata: + labels: + app.kubernetes.io/name: {{ template "postgres-operator.name" . }} + helm.sh/chart: {{ template "postgres-operator.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ .Values.podPriorityClassName }} +preemptionPolicy: PreemptLowerPriority +globalDefault: false +value: 1000000 +{{- end }} diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 834841058..1904dbf7f 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.4.0 + tag: v1.6.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -19,12 +19,20 @@ configTarget: "OperatorConfigurationCRD" configGeneral: # choose if deployment creates/updates CRDs with OpenAPIV3Validation enable_crd_validation: true + # update only the statefulsets without immediately doing the rolling update + enable_lazy_spilo_upgrade: false + # set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION + enable_pgversion_env_var: true # start any new database pod without limitations on shm memory enable_shm_volume: true + # enables backwards compatible path between Spilo 12 and Spilo 13 images + enable_spilo_wal_path_compat: false # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" + # Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s) + # kubernetes_use_configmaps: false # Spilo docker image - docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 # max number of instances in Postgres cluster. -1 = no limit min_instances: -1 # min number of instances in Postgres cluster. -1 = no limit @@ -41,7 +49,7 @@ configGeneral: # example: "exampleimage:exampletag" # number of routines the operator spawns to process requests concurrently - workers: 4 + workers: 8 # parameters describing Postgres users configUsers: @@ -63,6 +71,17 @@ configKubernetes: # keya: valuea # keyb: valueb + # key name for annotation that compares manifest value with current date + # delete_annotation_date_key: "delete-date" + + # key name for annotation that compares manifest value with cluster name + # delete_annotation_name_key: "delete-clustername" + + # list of annotations propagated from cluster manifest to statefulset and deployment + # downscaler_annotations: + # - deployment-time + # - downscaler/* + # enables initContainers to run actions before Spilo is started enable_init_containers: true # toggles pod anti affinity on the Postgres pods @@ -71,10 +90,14 @@ configKubernetes: enable_pod_disruption_budget: true # enables sidecar containers to run alongside Spilo in the same pod enable_sidecars: true - # name of the secret containing infrastructure roles names and passwords + # namespaced name of the secret containing infrastructure roles names and passwords # infrastructure_roles_secret_name: postgresql-infrastructure-roles - # list of labels that can be inherited from the cluster manifest + # list of annotation keys that can be inherited from the cluster manifest + # inherited_annotations: + # - owned-by + + # list of label keys that can be inherited from the cluster manifest # inherited_labels: # - application # - environment @@ -86,15 +109,17 @@ configKubernetes: # node_readiness_label: # status: ready - # name of the secret containing the OAuth2 token to pass to the teams API + # namespaced name of the secret containing the OAuth2 token to pass to the teams API # oauth_token_secret_name: postgresql-operator # defines the template for PDB (Pod Disruption Budget) names pdb_name_format: "postgres-{cluster}-pdb" # override topology key for pod anti affinity pod_antiaffinity_topology_key: "kubernetes.io/hostname" - # name of the ConfigMap with environment variables to populate on every pod - # pod_environment_configmap: "" + # namespaced name of the ConfigMap with environment variables to populate on every pod + # pod_environment_configmap: "default/my-custom-config" + # name of the Secret (in cluster namespace) with environment variables to populate on every pod + # pod_environment_secret: "my-custom-secret" # specify the pod management policy of stateful sets of Postgres clusters pod_management_policy: "ordered_ready" @@ -103,8 +128,6 @@ configKubernetes: # service account definition as JSON/YAML string to be used by postgres cluster pods # pod_service_account_definition: "" - # name of service account to be used by postgres cluster pods - pod_service_account_name: "postgres-pod" # role definition as JSON/YAML string to be used by postgres cluster pods # pod_service_account_role_definition: "" @@ -115,11 +138,16 @@ configKubernetes: pod_terminate_grace_period: 5m # template for database user secrets generated by the operator secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + # set user and group for the spilo container (required to run Spilo as non-root process) + # spilo_runasuser: "101" + # spilo_runasgroup: "103" # group ID with write-access to volumes (required to run Spilo as non-root process) # spilo_fsgroup: 103 # whether the Spilo container should run in privileged mode spilo_privileged: false + # storage resize strategy, available options are: ebs, pvc, off + storage_resize_mode: pvc # operator watches for postgres objects in the given namespace watched_namespace: "*" # listen to all namespaces @@ -166,6 +194,8 @@ configLoadBalancer: enable_master_load_balancer: false # toggles service type load balancer pointing to the replica pod of the cluster enable_replica_load_balancer: false + # define external traffic policy for the load balancer + external_traffic_policy: "Cluster" # defines the DNS name string template for the master load balancer cluster master_dns_name_format: "{cluster}.{team}.{hostedzone}" # defines the DNS name string template for the replica load balancer cluster @@ -198,19 +228,37 @@ configAwsOrGcp: # AWS region used to store ESB volumes aws_region: eu-central-1 + # enable automatic migration on AWS from gp2 to gp3 volumes + enable_ebs_gp3_migration: false + # defines maximum volume size in GB until which auto migration happens + # enable_ebs_gp3_migration_max_size: 1000 + + # GCP credentials that will be used by the operator / pods + # gcp_credentials: "" + # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods # kube_iam_role: "" # S3 bucket to use for shipping postgres daily logs # log_s3_bucket: "" + # GCS bucket to use for shipping WAL segments with WAL-E + # wal_gs_bucket: "" + # S3 bucket to use for shipping WAL segments with WAL-E # wal_s3_bucket: "" # configure K8s cron job managed by the operator configLogicalBackup: # image for pods of the logical backup job (example runs pg_dumpall) - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0" + # path of google cloud service account json file + # logical_backup_google_application_credentials: "" + + # prefix for the backup job name + logical_backup_job_prefix: "logical-backup-" + # storage provider - either "s3" or "gcs" + logical_backup_provider: "s3" # S3 Access Key ID logical_backup_s3_access_key_id: "" # S3 bucket to store backup results @@ -221,7 +269,7 @@ configLogicalBackup: logical_backup_s3_endpoint: "" # S3 Secret Access Key logical_backup_s3_secret_access_key: "" - # S3 server side encription + # S3 server side encryption logical_backup_s3_sse: "AES256" # backup schedule in the cron format logical_backup_schedule: "30 00 * * *" @@ -231,6 +279,11 @@ configTeamsApi: # team_admin_role will have the rights to grant roles coming from PG manifests # enable_admin_role_for_users: true + # operator watches for PostgresTeam CRs to assign additional teams and members to clusters + enable_postgres_team_crd: false + # toogle to create additional superuser teams from PostgresTeam CRs + # enable_postgres_team_crd_superusers: false + # toggle to grant superuser to team members created from the Teams API enable_team_superuser: false # toggles usage of the Teams API by the operator @@ -255,22 +308,24 @@ configTeamsApi: # URL of the Teams API service # teams_api_url: http://fake-teams-api.default.svc.cluster.local -# Scalyr is a log management tool that Zalando uses as a sidecar -configScalyr: - # API key for the Scalyr sidecar - # scalyr_api_key: "" - - # Docker image for the Scalyr sidecar - # scalyr_image: "" - - # CPU limit value for the Scalyr sidecar - scalyr_cpu_limit: "1" - # CPU rquest value for the Scalyr sidecar - scalyr_cpu_request: 100m - # Memory limit value for the Scalyr sidecar - scalyr_memory_limit: 500Mi - # Memory request value for the Scalyr sidecar - scalyr_memory_request: 50Mi +configConnectionPooler: + # db schema to install lookup function into + connection_pooler_schema: "pooler" + # db user for pooler to use + connection_pooler_user: "pooler" + # docker image + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9" + # max db connections the pooler should hold + connection_pooler_max_db_connections: 60 + # default pooling mode + connection_pooler_mode: "transaction" + # number of pooler instances + connection_pooler_number_of_instances: 2 + # default resources + connection_pooler_default_cpu_request: 500m + connection_pooler_default_memory_request: 100Mi + connection_pooler_default_cpu_limit: "1" + connection_pooler_default_memory_limit: 100Mi rbac: # Specifies whether RBAC resources should be created @@ -278,6 +333,7 @@ rbac: crd: # Specifies whether custom resource definitions should be created + # When using helm3, this is ignored; instead use "--skip-crds" to skip. create: true serviceAccount: @@ -287,8 +343,17 @@ serviceAccount: # If not set and create is true, a name is generated using the fullname template name: +podServiceAccount: + # The name of the ServiceAccount to be used by postgres cluster pods + # If not set a name is generated using the fullname template and "-pod" suffix + name: "postgres-pod" + +# priority class for operator pod priorityClassName: "" +# priority class for database pods +podPriorityClassName: "" + resources: limits: cpu: 500m @@ -297,14 +362,29 @@ resources: cpu: 100m memory: 250Mi +securityContext: + runAsUser: 1000 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + # Tolerations for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] -# Node labels for pod assignment -# Ref: https://kubernetes.io/docs/user-guide/node-selection/ -nodeSelector: {} +controllerID: + # Specifies whether a controller ID should be defined for the operator + # Note, all postgres manifest must then contain the following annotation to be found by this operator + # "acid.zalan.do/controller": + create: false + # The name of the controller ID to use. + # If not set and create is true, a name is generated using the fullname template + name: diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 5b077c9ab..330c4a04d 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.4.0 + tag: v1.6.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -15,16 +15,27 @@ podLabels: {} configTarget: "ConfigMap" +# JSON logging format +enableJsonLogging: false + # general configuration parameters configGeneral: # choose if deployment creates/updates CRDs with OpenAPIV3Validation enable_crd_validation: "true" + # update only the statefulsets without immediately doing the rolling update + enable_lazy_spilo_upgrade: "false" + # set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION + enable_pgversion_env_var: "true" # start any new database pod without limitations on shm memory enable_shm_volume: "true" + # enables backwards compatible path between Spilo 12 and Spilo 13 images + enable_spilo_wal_path_compat: "false" # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" + # Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s) + # kubernetes_use_configmaps: "false" # Spilo docker image - docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 # max number of instances in Postgres cluster. -1 = no limit min_instances: "-1" # min number of instances in Postgres cluster. -1 = no limit @@ -40,7 +51,7 @@ configGeneral: # sidecar_docker_images: "" # number of routines the operator spawns to process requests concurrently - workers: "4" + workers: "8" # parameters describing Postgres users configUsers: @@ -59,6 +70,15 @@ configKubernetes: # annotations attached to each database pod # custom_pod_annotations: "keya:valuea,keyb:valueb" + # key name for annotation that compares manifest value with current date + # delete_annotation_date_key: "delete-date" + + # key name for annotation that compares manifest value with cluster name + # delete_annotation_name_key: "delete-clustername" + + # list of annotations propagated from cluster manifest to statefulset and deployment + # downscaler_annotations: "deployment-time,downscaler/*" + # enables initContainers to run actions before Spilo is started enable_init_containers: "true" # toggles pod anti affinity on the Postgres pods @@ -67,10 +87,13 @@ configKubernetes: enable_pod_disruption_budget: "true" # enables sidecar containers to run alongside Spilo in the same pod enable_sidecars: "true" - # name of the secret containing infrastructure roles names and passwords + # namespaced name of the secret containing infrastructure roles names and passwords # infrastructure_roles_secret_name: postgresql-infrastructure-roles - # list of labels that can be inherited from the cluster manifest + # list of annotation keys that can be inherited from the cluster manifest + # inherited_annotations: owned-by + + # list of label keys that can be inherited from the cluster manifest # inherited_labels: application,environment # timeout for successful migration of master pods from unschedulable node @@ -79,15 +102,17 @@ configKubernetes: # set of labels that a running and active node should possess to be considered ready # node_readiness_label: "" - # name of the secret containing the OAuth2 token to pass to the teams API + # namespaced name of the secret containing the OAuth2 token to pass to the teams API # oauth_token_secret_name: postgresql-operator # defines the template for PDB (Pod Disruption Budget) names pdb_name_format: "postgres-{cluster}-pdb" # override topology key for pod anti affinity pod_antiaffinity_topology_key: "kubernetes.io/hostname" - # name of the ConfigMap with environment variables to populate on every pod - # pod_environment_configmap: "" + # namespaced name of the ConfigMap with environment variables to populate on every pod + # pod_environment_configmap: "default/my-custom-config" + # name of the Secret (in cluster namespace) with environment variables to populate on every pod + # pod_environment_secret: "my-custom-secret" # specify the pod management policy of stateful sets of Postgres clusters pod_management_policy: "ordered_ready" @@ -96,8 +121,6 @@ configKubernetes: # service account definition as JSON/YAML string to be used by postgres cluster pods # pod_service_account_definition: "" - # name of service account to be used by postgres cluster pods - pod_service_account_name: "postgres-pod" # role definition as JSON/YAML string to be used by postgres cluster pods # pod_service_account_role_definition: "" @@ -107,12 +130,17 @@ configKubernetes: # Postgres pods are terminated forcefully after this timeout pod_terminate_grace_period: 5m # template for database user secrets generated by the operator - secret_name_template: '{username}.{cluster}.credentials' + secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + # set user and group for the spilo container (required to run Spilo as non-root process) + # spilo_runasuser: "101" + # spilo_runasgroup: "103" # group ID with write-access to volumes (required to run Spilo as non-root process) # spilo_fsgroup: "103" # whether the Spilo container should run in privileged mode spilo_privileged: "false" + # storage resize strategy, available options are: ebs, pvc, off + storage_resize_mode: pvc # operator watches for postgres objects in the given namespace watched_namespace: "*" # listen to all namespaces @@ -157,6 +185,8 @@ configLoadBalancer: enable_master_load_balancer: "false" # toggles service type load balancer pointing to the replica pod of the cluster enable_replica_load_balancer: "false" + # define external traffic policy for the load balancer + external_traffic_policy: "Cluster" # defines the DNS name string template for the master load balancer cluster master_dns_name_format: '{cluster}.{team}.{hostedzone}' # defines the DNS name string template for the replica load balancer cluster @@ -189,6 +219,14 @@ configAwsOrGcp: # AWS region used to store ESB volumes aws_region: eu-central-1 + # enable automatic migration on AWS from gp2 to gp3 volumes + enable_ebs_gp3_migration: "false" + # defines maximum volume size in GB until which auto migration happens + # enable_ebs_gp3_migration_max_size: "1000" + + # GCP credentials for setting the GOOGLE_APPLICATION_CREDNETIALS environment variable + # gcp_credentials: "" + # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods # kube_iam_role: "" @@ -198,30 +236,46 @@ configAwsOrGcp: # S3 bucket to use for shipping WAL segments with WAL-E # wal_s3_bucket: "" + # GCS bucket to use for shipping WAL segments with WAL-E + # wal_gs_bucket: "" + # configure K8s cron job managed by the operator configLogicalBackup: # image for pods of the logical backup job (example runs pg_dumpall) - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0" + # path of google cloud service account json file + # logical_backup_google_application_credentials: "" + + # prefix for the backup job name + logical_backup_job_prefix: "logical-backup-" + # storage provider - either "s3" or "gcs" + logical_backup_provider: "s3" # S3 Access Key ID logical_backup_s3_access_key_id: "" # S3 bucket to store backup results logical_backup_s3_bucket: "my-bucket-url" - # S3 region of bucket - logical_backup_s3_region: "" # S3 endpoint url when not using AWS logical_backup_s3_endpoint: "" + # S3 region of bucket + logical_backup_s3_region: "" # S3 Secret Access Key logical_backup_s3_secret_access_key: "" - # S3 server side encription + # S3 server side encryption logical_backup_s3_sse: "AES256" # backup schedule in the cron format logical_backup_schedule: "30 00 * * *" + # automate creation of human users with teams API service configTeamsApi: # team_admin_role will have the rights to grant roles coming from PG manifests # enable_admin_role_for_users: "true" + # operator watches for PostgresTeam CRs to assign additional teams and members to clusters + enable_postgres_team_crd: "false" + # toogle to create additional superuser teams from PostgresTeam CRs + # enable_postgres_team_crd_superusers: "false" + # toggle to grant superuser to team members created from the Teams API # enable_team_superuser: "false" @@ -248,12 +302,33 @@ configTeamsApi: # URL of the Teams API service # teams_api_url: http://fake-teams-api.default.svc.cluster.local +# configure connection pooler deployment created by the operator +configConnectionPooler: + # db schema to install lookup function into + connection_pooler_schema: "pooler" + # db user for pooler to use + connection_pooler_user: "pooler" + # docker image + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9" + # max db connections the pooler should hold + connection_pooler_max_db_connections: "60" + # default pooling mode + connection_pooler_mode: "transaction" + # number of pooler instances + connection_pooler_number_of_instances: "2" + # default resources + connection_pooler_default_cpu_request: 500m + connection_pooler_default_memory_request: 100Mi + connection_pooler_default_cpu_limit: "1" + connection_pooler_default_memory_limit: 100Mi + rbac: # Specifies whether RBAC resources should be created create: true crd: # Specifies whether custom resource definitions should be created + # When using helm3, this is ignored; instead use "--skip-crds" to skip. create: true serviceAccount: @@ -263,8 +338,17 @@ serviceAccount: # If not set and create is true, a name is generated using the fullname template name: +podServiceAccount: + # The name of the ServiceAccount to be used by postgres cluster pods + # If not set a name is generated using the fullname template and "-pod" suffix + name: "postgres-pod" + +# priority class for operator pod priorityClassName: "" +# priority class for database pods +podPriorityClassName: "" + resources: limits: cpu: 500m @@ -273,14 +357,29 @@ resources: cpu: 100m memory: 250Mi +securityContext: + runAsUser: 1000 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + # Tolerations for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] -# Node labels for pod assignment -# Ref: https://kubernetes.io/docs/user-guide/node-selection/ -nodeSelector: {} +controllerID: + # Specifies whether a controller ID should be defined for the operator + # Note, all postgres manifest must then contain the following annotation to be found by this operator + # "acid.zalan.do/controller": + create: false + # The name of the controller ID to use. + # If not set and create is true, a name is generated using the fullname template + name: diff --git a/cmd/main.go b/cmd/main.go index 7fadd611a..376df0bad 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -2,7 +2,7 @@ package main import ( "flag" - "log" + log "github.com/sirupsen/logrus" "os" "os/signal" "sync" @@ -36,6 +36,8 @@ func init() { flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API") flag.Parse() + config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true" + configMapRawName := os.Getenv("CONFIG_MAP_NAME") if configMapRawName != "" { @@ -63,6 +65,9 @@ func init() { func main() { var err error + if config.EnableJsonLogging { + log.SetFormatter(&log.JSONFormatter{}) + } log.SetOutput(os.Stdout) log.Printf("Spilo operator %s\n", version) @@ -77,7 +82,7 @@ func main() { log.Fatalf("couldn't get REST config: %v", err) } - c := controller.NewController(&config) + c := controller.NewController(&config, "") c.Run(stop, wg) diff --git a/delivery.yaml b/delivery.yaml index 144448ea9..d0a689b8b 100644 --- a/delivery.yaml +++ b/delivery.yaml @@ -2,6 +2,10 @@ version: "2017-09-20" pipeline: - id: build-postgres-operator type: script + vm: large + cache: + paths: + - /go/pkg/mod commands: - desc: 'Update' cmd: | @@ -12,7 +16,7 @@ pipeline: - desc: 'Install go' cmd: | cd /tmp - wget -q https://storage.googleapis.com/golang/go1.12.linux-amd64.tar.gz -O go.tar.gz + wget -q https://storage.googleapis.com/golang/go1.15.6.linux-amd64.tar.gz -O go.tar.gz tar -xf go.tar.gz mv go /usr/local ln -s /usr/local/go/bin/go /usr/bin/go @@ -28,7 +32,7 @@ pipeline: IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test fi export IMAGE - make deps docker + make deps mocks docker - desc: 'Run unit tests' cmd: | export PATH=$PATH:$HOME/go/bin @@ -76,3 +80,15 @@ pipeline: export IMAGE make docker make push + + - id: build-logical-backup + type: script + + commands: + - desc: Build image + cmd: | + cd docker/logical-backup + export TAG=$(git describe --tags --always --dirty) + IMAGE="registry-write.opensource.zalan.do/acid/logical-backup" + docker build --rm -t "$IMAGE:$TAG$CDP_TAG" . + docker push "$IMAGE:$TAG$CDP_TAG" diff --git a/docker/DebugDockerfile b/docker/DebugDockerfile index 76dadf6df..e8f51badd 100644 --- a/docker/DebugDockerfile +++ b/docker/DebugDockerfile @@ -1,10 +1,19 @@ -FROM alpine -MAINTAINER Team ACID @ Zalando +FROM registry.opensource.zalan.do/library/alpine-3.12:latest +LABEL maintainer="Team ACID @ Zalando " # We need root certificates to deal with teams api over https RUN apk --no-cache add ca-certificates go git musl-dev -RUN go get github.com/derekparker/delve/cmd/dlv COPY build/* / -CMD ["/root/go/bin/dlv", "--listen=:7777", "--headless=true", "--api-version=2", "exec", "/postgres-operator"] +RUN addgroup -g 1000 pgo +RUN adduser -D -u 1000 -G pgo -g 'Postgres Operator' pgo + +RUN go get github.com/derekparker/delve/cmd/dlv +RUN cp /root/go/bin/dlv /dlv +RUN chown -R pgo:pgo /dlv + +USER pgo:pgo +RUN ls -l / + +CMD ["/dlv", "--listen=:7777", "--headless=true", "--api-version=2", "exec", "/postgres-operator"] diff --git a/docker/Dockerfile b/docker/Dockerfile index 520fd2d07..c1b87caf7 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,7 +1,8 @@ -FROM alpine -MAINTAINER Team ACID @ Zalando +FROM registry.opensource.zalan.do/library/alpine-3.12:latest +LABEL maintainer="Team ACID @ Zalando " # We need root certificates to deal with teams api over https +RUN apk --no-cache add curl RUN apk --no-cache add ca-certificates COPY build/* / diff --git a/docker/logical-backup/Dockerfile b/docker/logical-backup/Dockerfile index 94c524381..b84ea2b22 100644 --- a/docker/logical-backup/Dockerfile +++ b/docker/logical-backup/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:18.04 +FROM registry.opensource.zalan.do/library/ubuntu-18.04:latest LABEL maintainer="Team ACID @ Zalando " SHELL ["/bin/bash", "-o", "pipefail", "-c"] @@ -13,12 +13,16 @@ RUN apt-get update \ curl \ jq \ gnupg \ + gcc \ + libffi-dev \ && pip3 install --no-cache-dir awscli --upgrade \ + && pip3 install --no-cache-dir gsutil --upgrade \ && echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ && cat /etc/apt/sources.list.d/pgdg.list \ && curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && apt-get update \ && apt-get install --no-install-recommends -y \ + postgresql-client-13 \ postgresql-client-12 \ postgresql-client-11 \ postgresql-client-10 \ diff --git a/docker/logical-backup/dump.sh b/docker/logical-backup/dump.sh index 2d9a39e02..50f7e6e4c 100755 --- a/docker/logical-backup/dump.sh +++ b/docker/logical-backup/dump.sh @@ -46,6 +46,23 @@ function aws_upload { aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" } +function gcs_upload { + PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz + + gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP" +} + +function upload { + case $LOGICAL_BACKUP_PROVIDER in + "gcs") + gcs_upload + ;; + *) + aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF)) + ;; + esac +} + function get_pods { declare -r SELECTOR="$1" @@ -93,7 +110,7 @@ for search in "${search_strategy[@]}"; do done set -x -dump | compress | aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF)) +dump | compress | upload [[ ${PIPESTATUS[0]} != 0 || ${PIPESTATUS[1]} != 0 || ${PIPESTATUS[2]} != 0 ]] && (( ERRORCOUNT += 1 )) set +x diff --git a/docs/administrator.md b/docs/administrator.md index 0cc4cb279..520f9b129 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -11,17 +11,29 @@ switchover (planned failover) of the master to the Pod with new minor version. The switch should usually take less than 5 seconds, still clients have to reconnect. -Major version upgrades are supported via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster). -The new cluster manifest must have a higher `version` string than the source -cluster and will be created from a basebackup. Depending of the cluster size, -downtime in this case can be significant as writes to the database should be -stopped and all WAL files should be archived first before cloning is started. +Major version upgrades are supported either via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster) +or in-place. -Note, that simply changing the version string in the `postgresql` manifest does -not work at present and leads to errors. Neither Patroni nor Postgres Operator -can do in place `pg_upgrade`. Still, it can be executed manually in the Postgres -container, which is tricky (i.e. systems need to be stopped, replicas have to be -synced) but of course faster than cloning. +With cloning, the new cluster manifest must have a higher `version` string than +the source cluster and will be created from a basebackup. Depending of the +cluster size, downtime in this case can be significant as writes to the database +should be stopped and all WAL files should be archived first before cloning is +started. + +Starting with Spilo 13, Postgres Operator can do in-place major version upgrade, +which should be faster than cloning. However, it is not fully automatic yet. +First, you need to make sure, that setting the `PGVERSION` environment variable +is enabled in the configuration. Since `v1.6.0`, `enable_pgversion_env_var` is +enabled by default. + +To trigger the upgrade, increase the version in the cluster manifest. After +Pods are rotated `configure_spilo` will notice the version mismatch and start +the old version again. You can then exec into the Postgres container of the +master instance and call `python3 /scripts/inplace_upgrade.py N` where `N` +is the number of members of your cluster (see [`numberOfInstances`](https://github.com/zalando/postgres-operator/blob/50cb5898ea715a1db7e634de928b2d16dc8cd969/manifests/minimal-postgres-manifest.yaml#L10)). +The upgrade is usually fast, well under one minute for most DBs. Note, that +changes become irrevertible once `pg_upgrade` is called. To understand the +upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488). ## CRD Validation @@ -44,7 +56,7 @@ Once the validation is enabled it can only be disabled manually by editing or patching the CRD manifest: ```bash -zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}' +kubectl patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}' ``` ## Non-default cluster domain @@ -95,6 +107,96 @@ lacks access rights to any of them (except K8s system namespaces like 'list pods' execute at the cluster scope and fail at the first violation of access rights. +## Operators with defined ownership of certain Postgres clusters + +By default, multiple operators can only run together in one K8s cluster when +isolated into their [own namespaces](administrator.md#specify-the-namespace-to-watch). +But, it is also possible to define ownership between operator instances and +Postgres clusters running all in the same namespace or K8s cluster without +interfering. + +First, define the [`CONTROLLER_ID`](../../manifests/postgres-operator.yaml#L38) +environment variable in the operator deployment manifest. Then specify the ID +in every Postgres cluster manifest you want this operator to watch using the +`"acid.zalan.do/controller"` annotation: + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: demo-cluster + annotations: + "acid.zalan.do/controller": "second-operator" +spec: + ... +``` + +Every other Postgres cluster which lacks the annotation will be ignored by this +operator. Conversely, operators without a defined `CONTROLLER_ID` will ignore +clusters with defined ownership of another operator. + +## Delete protection via annotations + +To avoid accidental deletes of Postgres clusters the operator can check the +manifest for two existing annotations containing the cluster name and/or the +current date (in YYYY-MM-DD format). The name of the annotation keys can be +defined in the configuration. By default, they are not set which disables the +delete protection. Thus, one could choose to only go with one annotation. + +**postgres-operator ConfigMap** + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-operator +data: + delete_annotation_date_key: "delete-date" + delete_annotation_name_key: "delete-clustername" +``` + +**OperatorConfiguration** + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: OperatorConfiguration +metadata: + name: postgresql-operator-configuration +configuration: + kubernetes: + delete_annotation_date_key: "delete-date" + delete_annotation_name_key: "delete-clustername" +``` + +Now, every cluster manifest must contain the configured annotation keys to +trigger the delete process when running `kubectl delete pg`. Note, that the +`Postgresql` resource would still get deleted as K8s' API server does not +block it. Only the operator logs will tell, that the delete criteria wasn't +met. + +**cluster manifest** + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: demo-cluster + annotations: + delete-date: "2020-08-31" + delete-clustername: "demo-cluster" +spec: + ... +``` + +In case, the resource has been deleted accidentally or the annotations were +simply forgotten, it's safe to recreate the cluster with `kubectl create`. +Existing Postgres cluster are not replaced by the operator. But, as the +original cluster still exists the status will show `CreateFailed` at first. +On the next sync event it should change to `Running`. However, as it is in +fact a new resource for K8s, the UID will differ which can trigger a rolling +update of the pods because the UID is used as part of backup path to S3. + + ## Role-based access control for the operator The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml) @@ -292,13 +394,21 @@ spec: ## Custom Pod Environment Variables +It is possible to configure a ConfigMap as well as a Secret which are used by the Postgres pods as +an additional provider for environment variables. One use case is to customize +the Spilo image and configure it with environment variables. Another case could be to provide custom +cloud provider or backup settings. -It is possible to configure a ConfigMap which is used by the Postgres pods as -an additional provider for environment variables. +In general the Operator will give preference to the globally configured variables, to not have the custom +ones interfere with core functionality. Variables with the 'WAL_' and 'LOG_' prefix can be overwritten though, to allow +backup and logshipping to be specified differently. -One use case is to customize the Spilo image and configure it with environment -variables. The ConfigMap with the additional settings is configured in the -operator's main ConfigMap: + +### Via ConfigMap +The ConfigMap with the additional settings is referenced in the operator's main configuration. +A namespace can be specified along with the name. If left out, the configured +default namespace of your K8s client will be used and if the ConfigMap is not +found there, the Postgres cluster's namespace is taken when different: **postgres-operator ConfigMap** @@ -309,7 +419,7 @@ metadata: name: postgres-operator data: # referencing config map with custom settings - pod_environment_configmap: postgres-pod-config + pod_environment_configmap: default/postgres-pod-config ``` **OperatorConfiguration** @@ -322,7 +432,7 @@ metadata: configuration: kubernetes: # referencing config map with custom settings - pod_environment_configmap: postgres-pod-config + pod_environment_configmap: default/postgres-pod-config ``` **referenced ConfigMap `postgres-pod-config`** @@ -337,7 +447,54 @@ data: MY_CUSTOM_VAR: value ``` -This ConfigMap is then added as a source of environment variables to the +The key-value pairs of the ConfigMap are then added as environment variables to the +Postgres StatefulSet/pods. + + +### Via Secret +The Secret with the additional variables is referenced in the operator's main configuration. +To protect the values of the secret from being exposed in the pod spec they are each referenced +as SecretKeyRef. +This does not allow for the secret to be in a different namespace as the pods though + +**postgres-operator ConfigMap** + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-operator +data: + # referencing secret with custom environment variables + pod_environment_secret: postgres-pod-secrets +``` + +**OperatorConfiguration** + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: OperatorConfiguration +metadata: + name: postgresql-operator-configuration +configuration: + kubernetes: + # referencing secret with custom environment variables + pod_environment_secret: postgres-pod-secrets +``` + +**referenced Secret `postgres-pod-secrets`** + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres-pod-secrets + namespace: default +data: + MY_CUSTOM_VAR: dmFsdWU= +``` + +The key-value pairs of the Secret are all accessible as environment variables to the Postgres StatefulSet/pods. ## Limiting the number of min and max instances in clusters @@ -417,9 +574,12 @@ database. * **Human users** originate from the [Teams API](user.md#teams-api-roles) that returns a list of the team members given a team id. The operator differentiates between (a) product teams that own a particular Postgres cluster and are granted -admin rights to maintain it, and (b) Postgres superuser teams that get the -superuser access to all Postgres databases running in a K8s cluster for the -purposes of maintaining and troubleshooting. +admin rights to maintain it, (b) Postgres superuser teams that get superuser +access to all Postgres databases running in a K8s cluster for the purposes of +maintaining and troubleshooting, and (c) additional teams, superuser teams or +members associated with the owning team. The latter is managed via the +[PostgresTeam CRD](user.md#additional-teams-and-members-per-cluster). + ## Understanding rolling update of Spilo pods @@ -430,6 +590,17 @@ from numerous escape characters in the latter log entry, view it in CLI with `PodTemplate` used by the operator is yet to be updated with the default values used internally in K8s. +The operator also support lazy updates of the Spilo image. That means the pod +template of a PG cluster's stateful set is updated immediately with the new +image, but no rolling update follows. This feature saves you a switchover - and +hence downtime - when you know pods are re-started later anyway, for instance +due to the node rotation. To force a rolling update, disable this mode by +setting the `enable_lazy_spilo_upgrade` to `false` in the operator configuration +and restart the operator pod. With the standard eager rolling updates the +operator checks during Sync all pods run images specified in their respective +statefulsets. The operator triggers a rolling upgrade for PG clusters that +violate this condition. + ## Logical backups The operator can manage K8s cron jobs to run logical backups of Postgres @@ -479,6 +650,110 @@ A secret can be pre-provisioned in different ways: * Automatically provisioned via a custom K8s controller like [kube-aws-iam-controller](https://github.com/mikkeloscar/kube-aws-iam-controller) +## Google Cloud Platform setup + +To configure the operator on GCP there are some prerequisites that are needed: + +* A service account with the proper IAM setup to access the GCS bucket for the WAL-E logs +* The credentials file for the service account. + +The configuration paramaters that we will be using are: + +* `additional_secret_mount` +* `additional_secret_mount_path` +* `gcp_credentials` +* `wal_gs_bucket` + +### Generate a K8s secret resource + +Generate the K8s secret resource that will contain your service account's +credentials. It's highly recommended to use a service account and limit its +scope to just the WAL-E bucket. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: psql-wale-creds + namespace: default +type: Opaque +stringData: + key.json: |- + +``` + +### Setup your operator configuration values + +With the `psql-wale-creds` resource applied to your cluster, ensure that +the operator's configuration is set up like the following: + +```yml +... +aws_or_gcp: + additional_secret_mount: "pgsql-wale-creds" + additional_secret_mount_path: "/var/secrets/google" # or where ever you want to mount the file + # aws_region: eu-central-1 + # kube_iam_role: "" + # log_s3_bucket: "" + # wal_s3_bucket: "" + wal_gs_bucket: "postgres-backups-bucket-28302F2" # name of bucket on where to save the WAL-E logs + gcp_credentials: "/var/secrets/google/key.json" # combination of the mount path & key in the K8s resource. (i.e. key.json) +... +``` + +### Setup pod environment configmap + +To make postgres-operator work with GCS, use following configmap: +```yml +apiVersion: v1 +kind: ConfigMap +metadata: + name: pod-env-overrides + namespace: postgres-operator-system +data: + # Any env variable used by spilo can be added + USE_WALG_BACKUP: "true" + USE_WALG_RESTORE: "true" + CLONE_USE_WALG_RESTORE: "true" +``` +This configmap will instruct operator to use WAL-G, instead of WAL-E, for backup and restore. + +Then provide this configmap in postgres-operator settings: +```yml +... +# namespaced name of the ConfigMap with environment variables to populate on every pod +pod_environment_configmap: "postgres-operator-system/pod-env-overrides" +... +``` + + +## Sidecars for Postgres clusters + +A list of sidecars is added to each cluster created by the operator. The default +is empty. + +```yaml +kind: OperatorConfiguration +configuration: + sidecars: + - image: image:123 + name: global-sidecar + ports: + - containerPort: 80 + volumeMounts: + - mountPath: /custom-pgdata-mountpoint + name: pgdata + - ... +``` + +In addition to any environment variables you specify, the following environment +variables are always passed to sidecars: + + - `POD_NAME` - field reference to `metadata.name` + - `POD_NAMESPACE` - field reference to `metadata.namespace` + - `POSTGRES_USER` - the superuser that can be used to connect to the database + - `POSTGRES_PASSWORD` - the password for the superuser + ## Setting up the Postgres Operator UI Since the v1.2 release the Postgres Operator is shipped with a browser-based diff --git a/docs/developer.md b/docs/developer.md index 6e0fc33c8..8ab1e60bc 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -235,11 +235,31 @@ Then you can for example check the Patroni logs: kubectl logs acid-minimal-cluster-0 ``` +## Unit tests with Mocks and K8s Fake API + +Whenever possible you should rely on leveraging proper mocks and K8s fake client that allows full fledged testing of K8s objects in your unit tests. + +To enable mocks, a code annotation is needed: +[Mock code gen annotation](https://github.com/zalando/postgres-operator/blob/master/pkg/util/volumes/volumes.go#L3) + +To generate mocks run: +```bash +make mocks +``` + +Examples for mocks can be found in: +[Example mock usage](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/volumes_test.go#L248) + +Examples for fake K8s objects can be found in: +[Example fake K8s client usage](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/volumes_test.go#L166) + ## End-to-end tests -The operator provides reference end-to-end tests (e2e) (as Docker image) to -ensure various infrastructure parts work smoothly together. Each e2e execution -tests a Postgres Operator image built from the current git branch. The test +The operator provides reference end-to-end (e2e) tests to +ensure various infrastructure parts work smoothly together. The test code is available at `e2e/tests`. +The special `registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build. + +Each e2e execution tests a Postgres Operator image built from the current git branch. The test runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/), utilizes provided manifest examples, and runs e2e tests contained in the `tests` folder. The K8s API client in the container connects to the `kind` cluster via @@ -284,7 +304,7 @@ manifest files: Postgres manifest parameters are defined in the [api package](../pkg/apis/acid.zalan.do/v1/postgresql_type.go). The operator behavior has to be implemented at least in [k8sres.go](../pkg/cluster/k8sres.go). -Validation of CRD parameters is controlled in [crd.go](../pkg/apis/acid.zalan.do/v1/crds.go). +Validation of CRD parameters is controlled in [crds.go](../pkg/apis/acid.zalan.do/v1/crds.go). Please, reflect your changes in tests, for example in: * [config_test.go](../pkg/util/config/config_test.go) * [k8sres_test.go](../pkg/cluster/k8sres_test.go) diff --git a/docs/diagrams/neutral_operator.excalidraw b/docs/diagrams/neutral_operator.excalidraw new file mode 100644 index 000000000..f9e48aec1 --- /dev/null +++ b/docs/diagrams/neutral_operator.excalidraw @@ -0,0 +1,3499 @@ +{ + "type": "excalidraw", + "version": 2, + "source": "https://excalidraw.com", + "elements": [ + { + "id": "HJnnb_r0hPUKoBEINbers", + "type": "ellipse", + "x": 273, + "y": 517.75, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 10898020, + "version": 258, + "versionNonce": 298931548, + "isDeleted": false + }, + { + "id": "tCDf1dMVyFkty_0jKAnZs", + "type": "line", + "x": 273, + "y": 531.75, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1834520924, + "version": 237, + "versionNonce": 1077299676, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "nA3ZdlWP2zjjNACKfYs-d", + "type": "line", + "x": 395, + "y": 532.75, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1434407004, + "version": 289, + "versionNonce": 789098076, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "vtgct6qIZTm4sYOD92wKg", + "type": "ellipse", + "x": 274, + "y": 602.75, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 2141653860, + "version": 264, + "versionNonce": 1327137500, + "isDeleted": false + }, + { + "id": "mOLA3EYJz1RciiXTcNzKd", + "type": "text", + "x": 305, + "y": 654.25, + "width": 56, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1191437788, + "version": 171, + "versionNonce": 640281700, + "isDeleted": false, + "text": "pod-0", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "LKNTYzb6pb0XqqRf2YNv9", + "type": "ellipse", + "x": 539, + "y": 523.25, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 223989476, + "version": 308, + "versionNonce": 453278684, + "isDeleted": false + }, + { + "id": "75R3P1ZFskWD8-1ssBxzK", + "type": "line", + "x": 539, + "y": 537.25, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1763311964, + "version": 287, + "versionNonce": 1651949540, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "RT5N8ktBKNNZFEGC5HrUk", + "type": "line", + "x": 663.5, + "y": 538.25, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1317425764, + "version": 340, + "versionNonce": 966842852, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "dfEllTQv2I7GjGNlxLtO7", + "type": "ellipse", + "x": 540, + "y": 608.25, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 25785820, + "version": 314, + "versionNonce": 886907748, + "isDeleted": false + }, + { + "id": "jsYpTmNMxbY44mytnrs1Q", + "type": "ellipse", + "x": 735, + "y": 521.25, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 197655268, + "version": 290, + "versionNonce": 1837380828, + "isDeleted": false + }, + { + "id": "D5XP-OpR0GnxMkHaVvFR2", + "type": "line", + "x": 735, + "y": 535.25, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1895077212, + "version": 269, + "versionNonce": 1135285988, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "GSsk0CDMtzw5RPe6jYwQG", + "type": "line", + "x": 857, + "y": 536.25, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 911146596, + "version": 325, + "versionNonce": 1902197084, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "MYgWwh6xIpAnWKGvUPxaR", + "type": "ellipse", + "x": 736, + "y": 606.25, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1492224476, + "version": 296, + "versionNonce": 997104228, + "isDeleted": false + }, + { + "id": "Mgil_EoL7vCEANAAQbeT9", + "type": "text", + "x": 220, + "y": 686.25, + "width": 166, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 573888220, + "version": 166, + "versionNonce": 1814670812, + "isDeleted": false, + "text": "spilo-role=master", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "yeRW34kgnJTZIZlScfwrn", + "type": "text", + "x": 523.5, + "y": 689.25, + "width": 165, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1364782300, + "version": 245, + "versionNonce": 116764132, + "isDeleted": false, + "text": "spilo-role=replica", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "dyn57jMr5lf-PlaHz_aQC", + "type": "text", + "x": 579, + "y": 667.25, + "width": 44, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1855903580, + "version": 176, + "versionNonce": 1324323420, + "isDeleted": false, + "text": "pod-1", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "H3QMgY9OZFFTnObVeqsin", + "type": "text", + "x": 775, + "y": 659.25, + "width": 56, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1891081700, + "version": 214, + "versionNonce": 122573156, + "isDeleted": false, + "text": "pod-2", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "xTZVls7LlMTme9sH-DYxP", + "type": "text", + "x": 720.5, + "y": 691.25, + "width": 165, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1213607260, + "version": 314, + "versionNonce": 411363036, + "isDeleted": false, + "text": "spilo-role=replica", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "02J48ELeakCl3ignRYIBB", + "type": "draw", + "x": 994, + "y": 889.75, + "width": 456, + "height": 202, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 83904484, + "version": 269, + "versionNonce": 1271301348, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 2, + -10 + ], + [ + 0, + -13 + ], + [ + 0, + -15 + ], + [ + -6, + -21 + ], + [ + -19, + -27 + ], + [ + -32, + -27 + ], + [ + -47, + -14 + ], + [ + -54, + -13 + ], + [ + -59, + -19 + ], + [ + -81, + -32 + ], + [ + -98, + -36 + ], + [ + -128, + -38 + ], + [ + -142, + -35 + ], + [ + -152, + -31 + ], + [ + -168, + -21 + ], + [ + -183, + -4 + ], + [ + -184, + -4 + ], + [ + -195, + -19 + ], + [ + -200, + -23 + ], + [ + -207, + -26 + ], + [ + -219, + -27 + ], + [ + -283, + -2 + ], + [ + -296, + 8 + ], + [ + -299, + 12 + ], + [ + -300, + 26 + ], + [ + -299, + 28 + ], + [ + -299, + 25 + ], + [ + -312, + 25 + ], + [ + -323, + 27 + ], + [ + -335, + 32 + ], + [ + -343, + 38 + ], + [ + -361, + 65 + ], + [ + -368, + 80 + ], + [ + -371, + 97 + ], + [ + -369, + 105 + ], + [ + -366, + 110 + ], + [ + -352, + 118 + ], + [ + -344, + 119 + ], + [ + -336, + 118 + ], + [ + -316, + 109 + ], + [ + -310, + 104 + ], + [ + -309, + 101 + ], + [ + -308, + 115 + ], + [ + -305, + 130 + ], + [ + -296, + 144 + ], + [ + -282, + 159 + ], + [ + -274, + 163 + ], + [ + -262, + 164 + ], + [ + -240, + 163 + ], + [ + -210, + 153 + ], + [ + -173, + 139 + ], + [ + -137, + 118 + ], + [ + -134, + 115 + ], + [ + -129, + 121 + ], + [ + -114, + 144 + ], + [ + -98, + 154 + ], + [ + -86, + 157 + ], + [ + -61, + 157 + ], + [ + -36, + 153 + ], + [ + -16, + 147 + ], + [ + -8, + 143 + ], + [ + -6, + 136 + ], + [ + -5, + 112 + ], + [ + -6, + 106 + ], + [ + 3, + 119 + ], + [ + 8, + 122 + ], + [ + 17, + 124 + ], + [ + 26, + 123 + ], + [ + 57, + 111 + ], + [ + 74, + 100 + ], + [ + 80, + 92 + ], + [ + 83, + 77 + ], + [ + 76, + 57 + ], + [ + 69, + 43 + ], + [ + 83, + 31 + ], + [ + 85, + 23 + ], + [ + 73, + 3 + ], + [ + 67, + -5 + ], + [ + 47, + -15 + ], + [ + 25, + -12 + ], + [ + 0, + 0 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "VZp483EWEr7EJ_FlHJk0a", + "type": "text", + "x": 665.75, + "y": 931.5, + "width": 386, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 224990820, + "version": 195, + "versionNonce": 1620426212, + "isDeleted": false, + "text": "External Storage: S3, GCS", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "BrLh-5pM2Jhx-5_Vep5-G", + "type": "arrow", + "x": 393, + "y": 729.75, + "width": 262, + "height": 163, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 310475876, + "version": 138, + "versionNonce": 1240221796, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 262, + 163 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "UfPfV9MFcDCPYI-jf7seb", + "type": "text", + "x": 381.5, + "y": 814.75, + "width": 190, + "height": 25, + "angle": 0.45141580316417595, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [ + "3u9rzicVh0QO2xN6fmhVp" + ], + "seed": 1292594020, + "version": 326, + "versionNonce": 1344659420, + "isDeleted": false, + "text": "Nightly Basebackup", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "SmcG2YbgL-8v4clAUp0xh", + "type": "text", + "x": 346.75, + "y": 846.25, + "width": 225, + "height": 25, + "angle": 0.4312915734727083, + "strokeColor": "#000000", + "backgroundColor": "#fab005", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [ + "3u9rzicVh0QO2xN6fmhVp" + ], + "seed": 1782203356, + "version": 325, + "versionNonce": 361602020, + "isDeleted": false, + "text": "Write Ahead Log (WAL)", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "_Kyp73xUT4mahN8JdoGcS", + "type": "diamond", + "x": 277, + "y": 412.75, + "width": 112, + "height": 37, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 270461276, + "version": 139, + "versionNonce": 144016476, + "isDeleted": false + }, + { + "id": "Bmee_A3CCXFMs_Jo4fDwh", + "type": "diamond", + "x": 638, + "y": 404.75, + "width": 127, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1791880796, + "version": 172, + "versionNonce": 414744420, + "isDeleted": false + }, + { + "id": "AwIhDdIZFowEQWpatIS_J", + "type": "text", + "x": 265, + "y": 376.25, + "width": 146, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 446241244, + "version": 143, + "versionNonce": 1285331164, + "isDeleted": false, + "text": "Master Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "zYWhLoCY5QQJkDhjKtZMM", + "type": "text", + "x": 623.5, + "y": 374.25, + "width": 149, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1243730268, + "version": 137, + "versionNonce": 1270543076, + "isDeleted": false, + "text": "Replica Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "IPdSpq4-kBLUQkvNDjyXi", + "type": "arrow", + "x": 334.5, + "y": 452.25, + "width": 4.965440128473574, + "height": 57.669431607425224, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1700066276, + "version": 127, + "versionNonce": 935235548, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -4.965440128473574, + 57.669431607425224 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "05IcxTgNq-2qmCLEERhJz", + "type": "arrow", + "x": 698, + "y": 448.75, + "width": 93, + "height": 59, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1235196764, + "version": 112, + "versionNonce": 1831007844, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -93, + 59 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "OzTNSlxHW3EHNyqPewfNh", + "type": "arrow", + "x": 705, + "y": 444.75, + "width": 90, + "height": 70, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1509319268, + "version": 124, + "versionNonce": 330562916, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 90, + 70 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "0ocAfgcF_lOPK9OGRgqVZ", + "type": "text", + "x": 947.75, + "y": 215, + "width": 300, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1404120284, + "version": 182, + "versionNonce": 1435274980, + "isDeleted": false, + "text": "K8s account/network boundary", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "MY4EMfFbYicpuS02Xsrz5", + "type": "rectangle", + "x": 306, + "y": 206.75, + "width": 91, + "height": 41, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1814514020, + "version": 128, + "versionNonce": 614182244, + "isDeleted": false + }, + { + "id": "ymXiakDGTbDBDdbumOMAM", + "type": "text", + "x": 276.5, + "y": 172.25, + "width": 143, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 111950308, + "version": 140, + "versionNonce": 108395612, + "isDeleted": false, + "text": "Load Balancer", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "KbmHbQP5KiwSuGmdHvMPG", + "type": "rectangle", + "x": 655.25, + "y": 206.5, + "width": 91, + "height": 41, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 571247452, + "version": 180, + "versionNonce": 252207332, + "isDeleted": false + }, + { + "id": "EpO40F5rsuuBJFSu77u5n", + "type": "text", + "x": 625.75, + "y": 172, + "width": 143, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 995715172, + "version": 192, + "versionNonce": 438990044, + "isDeleted": false, + "text": "Load Balancer", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "JI02M9qfU4tMF5xh8ZxUg", + "type": "line", + "x": 408, + "y": 224.75, + "width": 238, + "height": 2, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1266200156, + "version": 141, + "versionNonce": 9610340, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 238, + 2 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "zTkeS6HMU9Ne-W4IQppVG", + "type": "line", + "x": 756, + "y": 225.75, + "width": 177, + "height": 0, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 2023140068, + "version": 117, + "versionNonce": 44254172, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 177, + 0 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "UqDNg7adJeN1tJ7o6cuMM", + "type": "line", + "x": 299, + "y": 223.75, + "width": 172, + "height": 4, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1114792676, + "version": 124, + "versionNonce": 1594390500, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -172, + 4 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "zsE606bqp5qqQi7xyFI6R", + "type": "arrow", + "x": 343, + "y": 254.75, + "width": 4, + "height": 111, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 650104412, + "version": 137, + "versionNonce": 1304848476, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -4, + 111 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "yVLFLudgVoRUdAxFNJC_z", + "type": "arrow", + "x": 698, + "y": 255.75, + "width": 0.8342433616519429, + "height": 119.78342345680107, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#4c6ef5", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 954257116, + "version": 131, + "versionNonce": 645389156, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0.8342433616519429, + 119.78342345680107 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "lM8YhhEb6z3bDEJuu4m3d", + "type": "rectangle", + "x": 1058, + "y": 285.75, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 837465444, + "version": 206, + "versionNonce": 34667740, + "isDeleted": false + }, + { + "id": "6vuRc6aOqhdoEuttAeKt2", + "type": "rectangle", + "x": 1044.75, + "y": 270.75, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1395971300, + "version": 187, + "versionNonce": 556715748, + "isDeleted": false + }, + { + "id": "-fAixshWpoLjWfYDN2XQn", + "type": "rectangle", + "x": 1074, + "y": 299.75, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 684139868, + "version": 244, + "versionNonce": 1762984284, + "isDeleted": false + }, + { + "id": "9ly5PAEyfbUB3QeBMxvhA", + "type": "arrow", + "x": 1034, + "y": 339.75, + "width": 249, + "height": 72, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 27470428, + "version": 132, + "versionNonce": 532370020, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -249, + 72 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "YhujSfYUqGuzRLIsXy9jA", + "type": "arrow", + "x": 1038, + "y": 287.75, + "width": 613, + "height": 95, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 726845916, + "version": 227, + "versionNonce": 1243903452, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -613, + 95 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "nGE3aIbk_78-jtgJnzjfR", + "type": "text", + "x": 773, + "y": 281.25, + "width": 138, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 43455324, + "version": 126, + "versionNonce": 820733412, + "isDeleted": false, + "text": "read &writes ", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "pMb4NsyES3HkiEmKW13wZ", + "type": "text", + "x": 910.5, + "y": 375.25, + "width": 89, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1546616164, + "version": 115, + "versionNonce": 844192348, + "isDeleted": false, + "text": "read only", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "CplzIegzDmsR0AWIsy2Nk", + "type": "ellipse", + "x": 1356.625, + "y": 684.25, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#868e96", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 617074276, + "version": 371, + "versionNonce": 1506609508, + "isDeleted": false + }, + { + "id": "FlifalHMUV7XU10a9nWsU", + "type": "line", + "x": 1356.625, + "y": 698.25, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1194179036, + "version": 335, + "versionNonce": 1013821148, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "ZML-phKfbEV-wWs8DNoCS", + "type": "line", + "x": 1478.625, + "y": 699.25, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1460167140, + "version": 387, + "versionNonce": 1303264484, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "sxeHgJbMHMUfe7pY2nz37", + "type": "ellipse", + "x": 1357.625, + "y": 769.25, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#868e96", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1167203932, + "version": 377, + "versionNonce": 1493922652, + "isDeleted": false + }, + { + "id": "XsWj_GN-Vna0UzrJZdvtD", + "type": "text", + "x": 1236.875, + "y": 475, + "width": 404, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 83159388, + "version": 129, + "versionNonce": 899524964, + "isDeleted": false, + "text": "Clone from External Storage", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "SR_Mx08J0VFGQwsyXJVBl", + "type": "diamond", + "x": 1365.375, + "y": 579.5, + "width": 112, + "height": 37, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#15aabf", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1095611996, + "version": 189, + "versionNonce": 377268188, + "isDeleted": false + }, + { + "id": "11TagJjN0nNn4FtwrFBke", + "type": "text", + "x": 1353.375, + "y": 543, + "width": 146, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 206859620, + "version": 186, + "versionNonce": 451367908, + "isDeleted": false, + "text": "Master Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "-JJEKxE4FMwgRCKN8xPNm", + "type": "arrow", + "x": 1425.375, + "y": 621.5, + "width": 5, + "height": 48, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1669237468, + "version": 158, + "versionNonce": 1758678108, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -5, + 48 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "N5Rd_bvTstfjJ4_Fudzbh", + "type": "arrow", + "x": 1096.375, + "y": 888.75, + "width": 237.5, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1399054820, + "version": 73, + "versionNonce": 1910595804, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 237.5, + -90 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "v3KmUjaQRCHwnpY1QivMD", + "type": "text", + "x": 1085.125, + "y": 813.75, + "width": 205, + "height": 25, + "angle": 5.92323678115218, + "strokeColor": "#000000", + "backgroundColor": "#15aabf", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1552100828, + "version": 184, + "versionNonce": 537307876, + "isDeleted": false, + "text": "Restore point in time", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "bp0xDgg7BJ_M54RKiISdr", + "type": "text", + "x": 1198.875, + "y": 290, + "width": 210, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#15aabf", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 217533284, + "version": 62, + "versionNonce": 1228465500, + "isDeleted": false, + "text": "Your Application", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "6spCo6ScZkWngCoTAcCiW", + "type": "ellipse", + "x": 947.625, + "y": 1154.5, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 905272292, + "version": 401, + "versionNonce": 1143724508, + "isDeleted": false + }, + { + "id": "F6nmJkeYAfpVLmj-qHhol", + "type": "line", + "x": 947.625, + "y": 1168.5, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1401537628, + "version": 380, + "versionNonce": 1216102884, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "4ccPjDIHyolwYrRjePIZQ", + "type": "line", + "x": 1069.625, + "y": 1169.5, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1445478244, + "version": 432, + "versionNonce": 1036795484, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "7n9Va4aCv2frULofssYe1", + "type": "ellipse", + "x": 949.875, + "y": 1245.75, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 789108956, + "version": 447, + "versionNonce": 48391524, + "isDeleted": false + }, + { + "id": "bwfIiq16JgZiB3KxNuUSE", + "type": "text", + "x": 979.625, + "y": 1291, + "width": 56, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1160950500, + "version": 314, + "versionNonce": 1592529628, + "isDeleted": false, + "text": "pod-0", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "5cQrHFOQIIiWj1g6iz-0u", + "type": "text", + "x": 919.625, + "y": 1321.75, + "width": 166, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1655790940, + "version": 331, + "versionNonce": 1673009380, + "isDeleted": false, + "text": "spilo-role=master", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "r715B9Xh8NK3tgDrHa_ta", + "type": "diamond", + "x": 949.125, + "y": 1405.75, + "width": 112, + "height": 37, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1638053476, + "version": 297, + "versionNonce": 1713198940, + "isDeleted": false + }, + { + "id": "IUD9gKNFwwDGssTCQMivE", + "type": "text", + "x": 940.875, + "y": 1453, + "width": 146, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 131420636, + "version": 301, + "versionNonce": 378231908, + "isDeleted": false, + "text": "Master Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "olLquFzP1bWExaxUnEtOC", + "type": "rectangle", + "x": 827.625, + "y": 1581.5, + "width": 357.50000000000006, + "height": 122.49999999999991, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 312740956, + "version": 279, + "versionNonce": 623521764, + "isDeleted": false + }, + { + "id": "2T8bRODpwVvJ_LB346Tys", + "type": "rectangle", + "x": 843.875, + "y": 1599, + "width": 93.75, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 154310628, + "version": 158, + "versionNonce": 1035743324, + "isDeleted": false + }, + { + "id": "15CHgGbfgXivIS1LM9pn0", + "type": "rectangle", + "x": 956.25, + "y": 1597.25, + "width": 93.75, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 259570396, + "version": 197, + "versionNonce": 799183716, + "isDeleted": false + }, + { + "id": "jH3mVWOPIu4Z23BhJup1k", + "type": "rectangle", + "x": 1070.75, + "y": 1597.75, + "width": 93.75, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 291171428, + "version": 182, + "versionNonce": 902364, + "isDeleted": false + }, + { + "id": "2NVzp0qsCAL-Wg6lDjMT1", + "type": "text", + "x": 1209.875, + "y": 1626.25, + "width": 318, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1993947100, + "version": 172, + "versionNonce": 608339684, + "isDeleted": false, + "text": "PGBouncer Deployment", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "MFTqi-5KJ9TBfh7uqNVkE", + "type": "arrow", + "x": 1003.3728133276104, + "y": 1572.2587801109535, + "width": 3.0021866723894846, + "height": 89.75878011095347, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 893334116, + "version": 134, + "versionNonce": 473483612, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 3.0021866723894846, + -89.75878011095347 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "H4Mnirn-bNpmp8P3r8e3l", + "type": "arrow", + "x": 1006.375, + "y": 1397.5, + "width": 1.25, + "height": 46.25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1660277988, + "version": 121, + "versionNonce": 2006452836, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -1.25, + -46.25 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "Ti-szwL_0r7pK6bVA0cpb", + "type": "rectangle", + "x": 791.3125, + "y": 1836.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1569973084, + "version": 397, + "versionNonce": 1972321756, + "isDeleted": false + }, + { + "id": "hrkuTxEdhUF4slXQn3d8p", + "type": "rectangle", + "x": 778.0625, + "y": 1821.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1620283492, + "version": 378, + "versionNonce": 1550917092, + "isDeleted": false + }, + { + "id": "F5w_L9hECvkc5DtRnBhAw", + "type": "rectangle", + "x": 807.3125, + "y": 1850.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1349982172, + "version": 435, + "versionNonce": 891495004, + "isDeleted": false + }, + { + "id": "0JIhGYPnVdHMjSdyBHj6E", + "type": "text", + "x": 1345.9375, + "y": 1919.25, + "width": 500, + "height": 35, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#15aabf", + "fillStyle": "cross-hatch", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1745609700, + "version": 321, + "versionNonce": 2077999460, + "isDeleted": false, + "text": "Your Application at large pod counts", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "ZHbFN5wfBx1NalW6UbZeM", + "type": "rectangle", + "x": 976.5, + "y": 1841.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 349222500, + "version": 431, + "versionNonce": 298219228, + "isDeleted": false + }, + { + "id": "3PnSvfGnPGd5XNGLNcRBH", + "type": "rectangle", + "x": 963.25, + "y": 1826.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1720144348, + "version": 412, + "versionNonce": 757462244, + "isDeleted": false + }, + { + "id": "x83a6lJgB-m58TYSJr7m8", + "type": "rectangle", + "x": 992.5, + "y": 1855.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 632572388, + "version": 469, + "versionNonce": 403389276, + "isDeleted": false + }, + { + "id": "yM7IY0uff7LBVk-kR8nsa", + "type": "rectangle", + "x": 1164, + "y": 1840, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 649289700, + "version": 428, + "versionNonce": 769505380, + "isDeleted": false + }, + { + "id": "fNOgZlF9boua0Bgb1tpPt", + "type": "rectangle", + "x": 1150.75, + "y": 1825, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1486749788, + "version": 409, + "versionNonce": 1917022172, + "isDeleted": false + }, + { + "id": "paHFy_DHE_S0Pf7eoTgTF", + "type": "rectangle", + "x": 1180, + "y": 1854, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 142984036, + "version": 466, + "versionNonce": 1506872292, + "isDeleted": false + }, + { + "id": "of-4scEuTkHGGLqyZpKil", + "type": "rectangle", + "x": 915.25, + "y": 1956.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1682303844, + "version": 428, + "versionNonce": 813765724, + "isDeleted": false + }, + { + "id": "1k7q_Kt5-BRgsBAWNpyUz", + "type": "rectangle", + "x": 902, + "y": 1941.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1542188252, + "version": 409, + "versionNonce": 1661857636, + "isDeleted": false + }, + { + "id": "0D93-v_7WWIol3r7Oedv9", + "type": "rectangle", + "x": 931.25, + "y": 1970.25, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 448454372, + "version": 466, + "versionNonce": 278276316, + "isDeleted": false + }, + { + "id": "i9XufVTPGOfxMO3Yr8pKR", + "type": "rectangle", + "x": 1107.75, + "y": 1957.5, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1850083044, + "version": 425, + "versionNonce": 165831396, + "isDeleted": false + }, + { + "id": "lsBHDW4ED1LbgRpMwVXqp", + "type": "rectangle", + "x": 1094.5, + "y": 1942.5, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 103631196, + "version": 406, + "versionNonce": 1175760220, + "isDeleted": false + }, + { + "id": "9Gv1xBz0PDtcIYrABxd-9", + "type": "rectangle", + "x": 1123.75, + "y": 1971.5, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 892045924, + "version": 463, + "versionNonce": 1423730276, + "isDeleted": false + }, + { + "id": "DpB7bLHnenCrJH0UrF2wE", + "type": "arrow", + "x": 818.875, + "y": 1809.25, + "width": 151.25, + "height": 92.5, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 600387420, + "version": 99, + "versionNonce": 1112449500, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 151.25, + -92.5 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "hhcSLd8LaNTFKrVkn7cpx", + "type": "arrow", + "x": 1011.375, + "y": 1814.25, + "width": 6.25, + "height": 98.75, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 475116516, + "version": 107, + "versionNonce": 1221196260, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -6.25, + -98.75 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "O-cD7TO6FEdMCeX8PHLiN", + "type": "arrow", + "x": 1196.375, + "y": 1811.75, + "width": 145, + "height": 90, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1661665244, + "version": 99, + "versionNonce": 2086012508, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -145, + -90 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "3v6ZCgcVGuyCcReUPeom4", + "type": "arrow", + "x": 1003.875, + "y": 1141.875, + "width": 40, + "height": 85, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 658333276, + "version": 94, + "versionNonce": 549002596, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -40, + -85 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "Lqxtc4vXdq0aP4yP3vrbo", + "type": "text", + "x": 1307.375, + "y": 1155.461956521739, + "width": 49.250000000000036, + "height": 74.94565217391309, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 246194532, + "version": 192, + "versionNonce": 956095196, + "isDeleted": false, + "text": "...", + "fontSize": 59.68022440392712, + "fontFamily": 1, + "textAlign": "left", + "baseline": 53 + }, + { + "id": "MpgPzae1IIzHDBhZ2dAsQ", + "type": "ellipse", + "x": 1153.125, + "y": 1147.625, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 321763812, + "version": 437, + "versionNonce": 100126948, + "isDeleted": false + }, + { + "id": "jz6wcBLdv5F7JTNEPJoUG", + "type": "line", + "x": 1153.125, + "y": 1161.625, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1259226716, + "version": 416, + "versionNonce": 1389698908, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "QxnIa0hyHn9qELemjPAEE", + "type": "line", + "x": 1275.125, + "y": 1162.625, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1755635044, + "version": 468, + "versionNonce": 1209275492, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "87YsZ0WhoaM_0KQpdMPcy", + "type": "ellipse", + "x": 1154.125, + "y": 1232.625, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 112007900, + "version": 443, + "versionNonce": 36754396, + "isDeleted": false + }, + { + "id": "5uRjADtUCimVL1nnCSAMG", + "type": "text", + "x": 1137.625, + "y": 1313.625, + "width": 165, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1516998884, + "version": 374, + "versionNonce": 349956068, + "isDeleted": false, + "text": "spilo-role=replica", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "UdAXrAQCc8cnIf7FmuG9G", + "type": "text", + "x": 1193.125, + "y": 1291.625, + "width": 44, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 8216412, + "version": 305, + "versionNonce": 622983260, + "isDeleted": false, + "text": "pod-1", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "iwGdpD-yA5HRQQQa5oObs", + "type": "ellipse", + "x": 414.5, + "y": 1190.625, + "width": 121, + "height": 32, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 863881572, + "version": 523, + "versionNonce": 811420516, + "isDeleted": false + }, + { + "id": "coxnHZBDXMLYv6wioEitM", + "type": "line", + "x": 414.5, + "y": 1204.625, + "width": 0, + "height": 91, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 589885148, + "version": 502, + "versionNonce": 125471964, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 91 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "cAt5dmyINZTv98xAyZP8b", + "type": "line", + "x": 536.5, + "y": 1205.625, + "width": 0, + "height": 89, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 860290276, + "version": 554, + "versionNonce": 753286884, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 0, + 89 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "3xYBhCRPLLXNqfYMftBXC", + "type": "ellipse", + "x": 416.75, + "y": 1281.875, + "width": 121, + "height": 34, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1180611420, + "version": 569, + "versionNonce": 718049628, + "isDeleted": false + }, + { + "id": "N0zhhdNd_2qdGLiaFXa5k", + "type": "text", + "x": 446.5, + "y": 1327.125, + "width": 56, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1520287844, + "version": 436, + "versionNonce": 587514468, + "isDeleted": false, + "text": "pod-0", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "1P4LkT1SjnEcULq5T-RN9", + "type": "text", + "x": 386.5, + "y": 1357.875, + "width": 166, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#ced4da", + "fillStyle": "solid", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 2003298268, + "version": 456, + "versionNonce": 241829340, + "isDeleted": false, + "text": "spilo-role=master", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "mdHM232x-I25hYtSee4Ge", + "type": "diamond", + "x": 416, + "y": 1441.875, + "width": 112, + "height": 37, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 22025188, + "version": 419, + "versionNonce": 1770444260, + "isDeleted": false + }, + { + "id": "ZJlUU3rT957t1RdM5RInK", + "type": "text", + "x": 407.75, + "y": 1489.125, + "width": 146, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#228be6", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 173444188, + "version": 423, + "versionNonce": 2003102300, + "isDeleted": false, + "text": "Master Service", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "B1Qvi5HNMM_roG6ON0BQD", + "type": "arrow", + "x": 473.25, + "y": 1433.625, + "width": 1.25, + "height": 46.25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1433139044, + "version": 243, + "versionNonce": 1299872100, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -1.25, + -46.25 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "XDPXWDobzcFq4D7R2gqh4", + "type": "arrow", + "x": 692.625, + "y": 1046.125, + "width": 178.44955960164782, + "height": 137.8884688600433, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1705785180, + "version": 76, + "versionNonce": 948911836, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + -178.44955960164782, + 137.8884688600433 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "83GpNN-rHKKsQhs40OAmS", + "type": "text", + "x": 547.875, + "y": 1088.625, + "width": 97, + "height": 25, + "angle": 5.608444364956034, + "strokeColor": "#000000", + "backgroundColor": "#12b886", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 728359772, + "version": 135, + "versionNonce": 1244507364, + "isDeleted": false, + "text": "continuous", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + }, + { + "id": "0iYzRksu9bFd4FRhNS4vL", + "type": "rectangle", + "x": 356.375, + "y": 1152.3750000000002, + "width": 228.75000000000003, + "height": 381.2499999999999, + "angle": 0, + "strokeColor": "#5f3dc4", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1912617828, + "version": 132, + "versionNonce": 2054882140, + "isDeleted": false + }, + { + "id": "_w-R0RLuSZGXtjYndbBuF", + "type": "text", + "x": 205.125, + "y": 1332.375, + "width": 225, + "height": 35, + "angle": 4.723624462642652, + "strokeColor": "#5f3dc4", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "dashed", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 339311844, + "version": 143, + "versionNonce": 1093562468, + "isDeleted": false, + "text": "Standby Cluster", + "fontSize": 28, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "_MObJgJ_wonJNtPP1p6ZX", + "type": "rectangle", + "x": 430.25, + "y": 1656.625, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1745745252, + "version": 505, + "versionNonce": 2141117532, + "isDeleted": false + }, + { + "id": "ers2ZQYACj6DmohzS1V61", + "type": "rectangle", + "x": 417, + "y": 1641.625, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1610711772, + "version": 486, + "versionNonce": 1938859876, + "isDeleted": false + }, + { + "id": "a7YApsVVLIgOEWz-wRw-R", + "type": "rectangle", + "x": 446.25, + "y": 1670.625, + "width": 102, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "#fd7e14", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1022470372, + "version": 543, + "versionNonce": 1220210908, + "isDeleted": false + }, + { + "id": "-HPd9HbQfjaSSnyVtxzAI", + "type": "arrow", + "x": 468.1411898687482, + "y": 1625.0761543437839, + "width": 5.733810131251801, + "height": 83.95115434378386, + "angle": 0, + "strokeColor": "#5f3dc4", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1743894116, + "version": 51, + "versionNonce": 1766726372, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 5.733810131251801, + -83.95115434378386 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "74_FKllXVIzRwRKskBVca", + "type": "text", + "x": 238.625, + "y": 1657.375, + "width": 143, + "height": 50, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 990652900, + "version": 76, + "versionNonce": 778073436, + "isDeleted": false, + "text": "Read only app\n(e.g. migration)", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 43 + }, + { + "id": "nhSbj-rOoBSaObbxkDC06", + "type": "text", + "x": -17.8215120805408, + "y": 418.7138510177598, + "width": 297.4346399775327, + "height": 35.408885711611035, + "angle": 4.728012709005166, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 107991652, + "version": 133, + "versionNonce": 1041309284, + "isDeleted": false, + "text": "Postgres Deployment", + "fontSize": 28.32710856928882, + "fontFamily": 1, + "textAlign": "left", + "baseline": 25 + }, + { + "id": "P6l-7fAr8MMc9KsB2kZw_", + "type": "text", + "x": 409.625, + "y": 30.375, + "width": 1036.2000000000005, + "height": 55, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 2073692900, + "version": 209, + "versionNonce": 1038182876, + "isDeleted": false, + "text": "Zalando Postgres Operator : Supported Setups", + "fontSize": 43.99999999999998, + "fontFamily": 1, + "textAlign": "left", + "baseline": 39 + }, + { + "id": "bZZTypDxRVen7t6-5jXqf", + "type": "arrow", + "x": 404.8103958096076, + "y": 598.9661255675087, + "width": 125.49452508728018, + "height": 1.4560642183292885, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1915235036, + "version": 95, + "versionNonce": 2115434724, + "isDeleted": false, + "points": [ + [ + 0, + 0 + ], + [ + 125.49452508728018, + -1.4560642183292885 + ] + ], + "lastCommittedPoint": null + }, + { + "id": "g1TuJyldd9lGKOVeuxdCc", + "type": "text", + "x": 424.125, + "y": 563.625, + "width": 67, + "height": 25, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "hachure", + "strokeWidth": 1, + "strokeStyle": "solid", + "roughness": 1, + "opacity": 100, + "groupIds": [], + "seed": 1359012196, + "version": 11, + "versionNonce": 173002332, + "isDeleted": false, + "text": "stream", + "fontSize": 20, + "fontFamily": 1, + "textAlign": "left", + "baseline": 18 + } + ], + "appState": { + "viewBackgroundColor": "#ffffff" + } +} \ No newline at end of file diff --git a/docs/diagrams/neutral_operator.png b/docs/diagrams/neutral_operator.png new file mode 100644 index 000000000..b8f807d63 Binary files /dev/null and b/docs/diagrams/neutral_operator.png differ diff --git a/docs/gsoc-2019/ideas.md b/docs/gsoc-2019/ideas.md deleted file mode 100644 index 456a5a0ff..000000000 --- a/docs/gsoc-2019/ideas.md +++ /dev/null @@ -1,63 +0,0 @@ -

Google Summer of Code 2019

- -## Applications steps - -1. Please carefully read the official [Google Summer of Code Student Guide](https://google.github.io/gsocguides/student/) -2. Join the #postgres-operator slack channel under [Postgres Slack](https://postgres-slack.herokuapp.com) to introduce yourself to the community and get quick feedback on your application. -3. Select a project from the list of ideas below or propose your own. -4. Write a proposal draft. Please open an issue with the label `gsoc2019_application` in the [operator repository](https://github.com/zalando/postgres-operator/issues) so that the community members can publicly review it. See proposal instructions below for details. -5. Submit proposal and the proof of enrollment before April 9 2019 18:00 UTC through the web site of the Program. - -## Project ideas - - -### Place database pods into the "Guaranteed" Quality-of-Service class - -* **Description**: Kubernetes runtime does not kill pods in this class on condition they stay within their resource limits, which is desirable for the DB pods serving production workloads. To be assigned to that class, pod's resources must equal its limits. The task is to add the `enableGuaranteedQoSClass` or the like option to the Postgres manifest and the operator configmap that forcibly re-write pod resources to match the limits. -* **Recommended skills**: golang, basic Kubernetes abstractions -* **Difficulty**: moderate -* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) - -### Implement the kubectl plugin for the Postgres CustomResourceDefinition - -* **Description**: [kubectl plugins](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/) enable extending the Kubernetes command-line client `kubectl` with commands to manage custom resources. The task is to design and implement a plugin for the `kubectl postgres` command, -that can enable, for example, correct deletion or major version upgrade of Postgres clusters. -* **Recommended skills**: golang, shell scripting, operational experience with Kubernetes -* **Difficulty**: moderate to medium, depending on the plugin design -* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) - -### Implement the openAPIV3Schema for the Postgres CRD - -* **Description**: at present the operator validates a database manifest on its own. -It will be helpful to reject erroneous manifests before they reach the operator using the [native Kubernetes CRD validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation). It is up to the student to decide whether to write the schema manually or to adopt existing [schema generator developed for the Prometheus project](https://github.com/ant31/crd-validation). -* **Recommended skills**: golang, JSON schema -* **Difficulty**: medium -* **Mentor(s)**: Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) -* **Issue**: [#388](https://github.com/zalando/postgres-operator/issues/388) - -### Design a solution for the local testing of the operator - -* **Description**: The current way of testing is to run minikube, either manually or with some tooling around it like `/run-operator_locally.sh` or Vagrant. This has at least three problems: -First, minikube is a single node cluster, so it is unsuitable for testing vital functions such as pod migration between nodes. Second, minikube starts slowly; that prolongs local testing. -Third, every contributor needs to come up with their own solution for local testing. The task is to come up with a better option which will enable us to conveniently and uniformly run e2e tests locally / potentially in Travis CI. -A promising option is the Kubernetes own [kind](https://github.com/kubernetes-sigs/kind) -* **Recommended skills**: Docker, shell scripting, basic Kubernetes abstractions -* **Difficulty**: medium to hard depending on the selected desing -* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) -* **Issue**: [#475](https://github.com/zalando/postgres-operator/issues/475) - -### Detach a Postgres cluster from the operator for maintenance - -* **Description**: sometimes a Postgres cluster requires manual maintenance. During such maintenance the operator should ignore all the changes manually applied to the cluster. - Currently the only way to achieve this behavior is to shutdown the operator altogether, for instance by scaling down the operator's own deployment to zero pods. That approach evidently affects all Postgres databases under the operator control and thus is highly undesirable in production Kubernetes clusters. It would be much better to be able to detach only the desired Postgres cluster from the operator for the time being and re-attach it again after maintenance. -* **Recommended skills**: golang, architecture of a Kubernetes operator -* **Difficulty**: hard - requires significant modification of the operator's internals and careful consideration of the corner cases. -* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov) -* **Issue**: [#421](https://github.com/zalando/postgres-operator/issues/421) - -### Propose your own idea - -Feel free to come up with your own ideas. For inspiration, -see [our bug tracker](https://github.com/zalando/postgres-operator/issues), -the [official `CustomResouceDefinition` docs](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/) -and [other operators](https://github.com/operator-framework/awesome-operators). diff --git a/docs/index.md b/docs/index.md index 87b08deb2..d0b4e4940 100644 --- a/docs/index.md +++ b/docs/index.md @@ -37,9 +37,10 @@ in some overarching orchestration, like rolling updates to improve the user experience. Monitoring or tuning Postgres is not in scope of the operator in the current -state. Other tools like [ZMON](https://opensource.zalando.com/zmon/), -[Prometheus](https://prometheus.io/) or more Postgres specific options can be -used to complement it. +state. However, with globally configurable sidecars we provide enough +flexibility to complement it with other tools like [ZMON](https://opensource.zalando.com/zmon/), +[Prometheus](https://prometheus.io/) or more Postgres specific options. + ## Overview of involved entities @@ -70,12 +71,26 @@ Please, report any issues discovered to https://github.com/zalando/postgres-oper ## Talks -1. "Building your own PostgreSQL-as-a-Service on Kubernetes" talk by Alexander Kukushkin, KubeCon NA 2018: [video](https://www.youtube.com/watch?v=G8MnpkbhClc) | [slides](https://static.sched.com/hosted_files/kccna18/1d/Building%20your%20own%20PostgreSQL-as-a-Service%20on%20Kubernetes.pdf) +- "PostgreSQL on K8S at Zalando: Two years in production" talk by Alexander Kukushkin, FOSSDEM 2020: [video](https://fosdem.org/2020/schedule/event/postgresql_postgresql_on_k8s_at_zalando_two_years_in_production/) | [slides](https://fosdem.org/2020/schedule/event/postgresql_postgresql_on_k8s_at_zalando_two_years_in_production/attachments/slides/3883/export/events/attachments/postgresql_postgresql_on_k8s_at_zalando_two_years_in_production/slides/3883/PostgreSQL_on_K8s_at_Zalando_Two_years_in_production.pdf) -2. "PostgreSQL and Kubernetes: DBaaS without a vendor-lock" talk by Oleksii Kliukin, PostgreSQL Sessions 2018: [video](https://www.youtube.com/watch?v=q26U2rQcqMw) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-and-kubernetes-dbaas-without-a-vendor-lock) +- "Postgres as a Service at Zalando" talk by Jan Mußler, DevOpsDays PoznaÅ„ 2019: [video](https://www.youtube.com/watch?v=FiWS5m72XI8) -3. "PostgreSQL High Availability on Kubernetes with Patroni" talk by Oleksii Kliukin, Atmosphere 2018: [video](https://www.youtube.com/watch?v=cFlwQOPPkeg) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-high-availability-on-kubernetes-with-patroni) +- "Building your own PostgreSQL-as-a-Service on Kubernetes" talk by Alexander Kukushkin, KubeCon NA 2018: [video](https://www.youtube.com/watch?v=G8MnpkbhClc) | [slides](https://static.sched.com/hosted_files/kccna18/1d/Building%20your%20own%20PostgreSQL-as-a-Service%20on%20Kubernetes.pdf) -4. "Blue elephant on-demand: Postgres + Kubernetes" talk by Oleksii Kliukin and Jan Mussler, FOSDEM 2018: [video](https://fosdem.org/2018/schedule/event/blue_elephant_on_demand_postgres_kubernetes/) | [slides (pdf)](https://www.postgresql.eu/events/fosdem2018/sessions/session/1735/slides/59/FOSDEM%202018_%20Blue_Elephant_On_Demand.pdf) +- "PostgreSQL and Kubernetes: DBaaS without a vendor-lock" talk by Oleksii Kliukin, PostgreSQL Sessions 2018: [video](https://www.youtube.com/watch?v=q26U2rQcqMw) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-and-kubernetes-dbaas-without-a-vendor-lock) -5. "Kube-Native Postgres" talk by Josh Berkus, KubeCon 2017: [video](https://www.youtube.com/watch?v=Zn1vd7sQ_bc) +- "PostgreSQL High Availability on Kubernetes with Patroni" talk by Oleksii Kliukin, Atmosphere 2018: [video](https://www.youtube.com/watch?v=cFlwQOPPkeg) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-high-availability-on-kubernetes-with-patroni) + +- "Blue elephant on-demand: Postgres + Kubernetes" talk by Oleksii Kliukin and Jan Mussler, FOSDEM 2018: [video](https://fosdem.org/2018/schedule/event/blue_elephant_on_demand_postgres_kubernetes/) | [slides (pdf)](https://www.postgresql.eu/events/fosdem2018/sessions/session/1735/slides/59/FOSDEM%202018_%20Blue_Elephant_On_Demand.pdf) + +- "Kube-Native Postgres" talk by Josh Berkus, KubeCon 2017: [video](https://www.youtube.com/watch?v=Zn1vd7sQ_bc) + +## Posts + +- "How to set up continuous backups and monitoring" by PÃ¥l Kristensen on [GitHub](https://github.com/zalando/postgres-operator/issues/858#issuecomment-608136253), Mar. 2020. + +- "Postgres on Kubernetes with the Zalando operator" by Vito Botta on [has_many :code](https://vitobotta.com/2020/02/05/postgres-kubernetes-zalando-operator/), Feb. 2020. + +- "Running PostgreSQL in Google Kubernetes Engine" by Kenneth Rørvik on [Repill Linpro](https://www.redpill-linpro.com/techblog/2019/09/28/postgres-in-kubernetes.html), Sep. 2019. + +- "Zalando Postgres Operator: One Year Later" by Sergey Dudoladov on [Open Source Zalando](https://opensource.zalando.com/blog/2018/11/postgres-operator/), Nov. 2018 diff --git a/docs/quickstart.md b/docs/quickstart.md index d2c88b9a4..86d2fabbf 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -34,8 +34,8 @@ Postgres cluster. This can work in two ways: via a ConfigMap or a custom The Postgres Operator can be deployed in the following ways: * Manual deployment +* Kustomization * Helm chart -* Operator Lifecycle Manager (OLM) ### Manual deployment setup @@ -91,20 +91,6 @@ The chart works with both Helm 2 and Helm 3. The `crd-install` hook from v2 will be skipped with warning when using v3. Documentation for installing applications with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/). -### Operator Lifecycle Manager (OLM) - -The [Operator Lifecycle Manager (OLM)](https://github.com/operator-framework/operator-lifecycle-manager) -has been designed to facilitate management of K8s operators. It has to be -installed in your K8s environment. When OLM is set up simply download and deploy -the Postgres Operator with the following command: - -```bash -kubectl create -f https://operatorhub.io/install/postgres-operator.yaml -``` - -This installs the operator in the `operators` namespace. More information can be -found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator). - ## Check if Postgres Operator is running Starting the operator may take a few seconds. Check if the operator pod is @@ -142,6 +128,9 @@ To deploy the UI simply apply all its manifests files or use the UI helm chart: # manual deployment kubectl apply -f ui/manifests/ +# or kustomization +kubectl apply -k github.com/zalando/postgres-operator/ui/manifests + # or helm chart helm install postgres-operator-ui ./charts/postgres-operator-ui ``` @@ -160,7 +149,7 @@ You can now access the web interface by port forwarding the UI pod (mind the label selector) and enter `localhost:8081` in your browser: ```bash -kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081 +kubectl port-forward svc/postgres-operator-ui 8081:80 ``` Available option are explained in detail in the [UI docs](operator-ui.md). diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 7b049b6fa..1b2d71a66 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -65,6 +65,20 @@ These parameters are grouped directly under the `spec` key in the manifest. custom Docker image that overrides the **docker_image** operator parameter. It should be a [Spilo](https://github.com/zalando/spilo) image. Optional. +* **schedulerName** + specifies the scheduling profile for database pods. If no value is provided + K8s' `default-scheduler` will be used. Optional. + +* **spiloRunAsUser** + sets the user ID which should be used in the container to run the process. + This must be set to run the container without root. By default the container + runs with root. This option only works for Spilo versions >= 1.6-p3. + +* **spiloRunAsGroup** + sets the group ID which should be used in the container to run the process. + This must be set to run the container without root. By default the container + runs with root. This option only works for Spilo versions >= 1.6-p3. + * **spiloFSGroup** the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and writable by the group ID specified. This will override the **spilo_fsgroup** @@ -111,12 +125,12 @@ These parameters are grouped directly under the `spec` key in the manifest. value overrides the `pod_toleration` setting from the operator. Optional. * **podPriorityClassName** - a name of the [priority - class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass) - that should be assigned to the cluster pods. When not specified, the value - is taken from the `pod_priority_class_name` operator parameter, if not set - then the default priority class is taken. The priority class itself must be - defined in advance. Optional. + a name of the [priority + class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass) + that should be assigned to the cluster pods. When not specified, the value + is taken from the `pod_priority_class_name` operator parameter, if not set + then the default priority class is taken. The priority class itself must be + defined in advance. Optional. * **podAnnotations** A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) @@ -140,6 +154,16 @@ These parameters are grouped directly under the `spec` key in the manifest. is `false`, then no volume will be mounted no matter how operator was configured (so you can override the operator configuration). Optional. +* **enableConnectionPooler** + Tells the operator to create a connection pooler with a database for the master + service. If this field is true, a connection pooler deployment will be created even if + `connectionPooler` section is empty. Optional, not set by default. + +* **enableReplicaConnectionPooler** + Tells the operator to create a connection pooler with a database for the replica + service. If this field is true, a connection pooler deployment for replica + will be created even if `connectionPooler` section is empty. Optional, not set by default. + * **enableLogicalBackup** Determines if the logical backup of this cluster should be taken and uploaded to S3. Default: false. Optional. @@ -149,6 +173,18 @@ These parameters are grouped directly under the `spec` key in the manifest. [the reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule) into account. Optional. Default is: "30 00 \* \* \*" +* **additionalVolumes** + List of additional volumes to mount in each container of the statefulset pod. + Each item must contain a `name`, `mountPath`, and `volumeSource` which is a + [kubernetes volumeSource](https://godoc.org/k8s.io/api/core/v1#VolumeSource). + It allows you to mount existing PersistentVolumeClaims, ConfigMaps and Secrets inside the StatefulSet. + Also an `emptyDir` volume can be shared between initContainer and statefulSet. + Additionaly, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example). + You can also specify in which container the additional Volumes will be mounted with the `targetContainers` array option. + If `targetContainers` is empty, additional volumes will be mounted only in the `postgres` container. + If you set the `all` special item, it will be mounted in all containers (postgres + sidecars). + Else you can set the list of target containers in which the additional volumes will be mounted (eg : postgres, telegraf) + ## Postgres parameters Those parameters are grouped under the `postgresql` top-level key, which is @@ -184,9 +220,9 @@ explanation of `ttl` and `loop_wait` parameters. ``` hostssl all +pamrole all pam ``` - , where pamrole is the name of the role for the pam authentication; any - custom `pg_hba` should include the pam line to avoid breaking pam - authentication. Optional. + where pamrole is the name of the role for the pam authentication; any + custom `pg_hba` should include the pam line to avoid breaking pam + authentication. Optional. * **ttl** Patroni `ttl` parameter value, optional. The default is set by the Spilo @@ -212,6 +248,12 @@ explanation of `ttl` and `loop_wait` parameters. automatically created by Patroni for cluster members and permanent replication slots. Optional. +* **synchronous_mode** + Patroni `synchronous_mode` parameter value. The default is set to `false`. Optional. + +* **synchronous_mode_strict** + Patroni `synchronous_mode_strict` parameter value. Can be used in addition to `synchronous_mode`. The default is set to `false`. Optional. + ## Postgres container resources Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) @@ -296,13 +338,13 @@ archive is supported. the url to S3 bucket containing the WAL archive of the remote primary. Required when the `standby` section is present. -## EBS volume resizing +## Volume properties Those parameters are grouped under the `volume` top-level key and define the properties of the persistent storage that stores Postgres data. * **size** - the size of the target EBS volume. Usual Kubernetes size modifiers, i.e. `Gi` + the size of the target volume. Usual Kubernetes size modifiers, i.e. `Gi` or `Mi`, apply. Required. * **storageClass** @@ -314,6 +356,14 @@ properties of the persistent storage that stores Postgres data. * **subPath** Subpath to use when mounting volume into Spilo container. Optional. +* **iops** + When running the operator on AWS the latest generation of EBS volumes (`gp3`) + allows for configuring the number of IOPS. Maximum is 16000. Optional. + +* **throughput** + When running the operator on AWS the latest generation of EBS volumes (`gp3`) + allows for configuring the throughput in MB/s. Maximum is 1000. Optional. + ## Sidecar definitions Those parameters are defined under the `sidecars` key. They consist of a list @@ -359,3 +409,67 @@ CPU and memory limits for the sidecar container. * **memory** memory limits for the sidecar container. Optional, overrides the `default_memory_limits` operator configuration parameter. Optional. + +## Connection pooler + +Parameters are grouped under the `connectionPooler` top-level key and specify +configuration for connection pooler. If this section is not empty, a connection +pooler will be created for master service only even if `enableConnectionPooler` +is not present. But if this section is present then it defines the configuration +for both master and replica pooler services (if `enableReplicaConnectionPooler` + is enabled). + +* **numberOfInstances** + How many instances of connection pooler to create. + +* **schema** + Database schema to create for credentials lookup function. + +* **user** + User to create for connection pooler to be able to connect to a database. + You can also choose a role from the `users` section or a system user role. + +* **dockerImage** + Which docker image to use for connection pooler deployment. + +* **maxDBConnections** + How many connections the pooler can max hold. This value is divided among the + pooler pods. + +* **mode** + In which mode to run connection pooler, transaction or session. + +* **resources** + Resource configuration for connection pooler deployment. + +## Custom TLS certificates + +Those parameters are grouped under the `tls` top-level key. + +* **secretName** + By setting the `secretName` value, the cluster will switch to load the given + Kubernetes Secret into the container as a volume and uses that as the + certificate instead. It is up to the user to create and manage the + Kubernetes Secret either by hand or using a tool like the CertManager + operator. + +* **certificateFile** + Filename of the certificate. Defaults to "tls.crt". + +* **privateKeyFile** + Filename of the private key. Defaults to "tls.key". + +* **caFile** + Optional filename to the CA certificate (e.g. "ca.crt"). Useful when the + client connects with `sslmode=verify-ca` or `sslmode=verify-full`. + Default is empty. + +* **caSecretName** + By setting the `caSecretName` value, the ca certificate file defined by the + `caFile` will be fetched from this secret instead of `secretName` above. + This secret has to hold a file with that name in its root. + + Optionally one can provide full path for any of them. By default it is + relative to the "/tls/", which is mount path of the tls secret. + If `caSecretName` is defined, the ca.crt path is relative to "/tlsca/", + otherwise to the same "/tls/". diff --git a/docs/reference/command_line_and_environment.md b/docs/reference/command_line_and_environment.md index ec5da5ceb..35f47cabf 100644 --- a/docs/reference/command_line_and_environment.md +++ b/docs/reference/command_line_and_environment.md @@ -45,7 +45,7 @@ The following environment variables are accepted by the operator: all namespaces. Empty value defaults to the operator namespace. Overrides the `watched_namespace` operator parameter. -* **SCALYR_API_KEY** +* **SCALYR_API_KEY** (*deprecated*) the value of the Scalyr API key to supply to the pods. Overrides the `scalyr_api_key` operator parameter. @@ -56,3 +56,7 @@ The following environment variables are accepted by the operator: * **CRD_READY_WAIT_INTERVAL** defines the interval between consecutive attempts waiting for the `postgresql` CRD to be created. The default is 5s. + +* **ENABLE_JSON_LOGGING** + Set to `true` for JSON formatted logging output. + The default is false. diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 21b172ff4..7797f1b73 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -75,11 +75,27 @@ Those are top-level keys, containing both leaf keys and groups. [OpenAPI v3 schema validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation) The default is `true`. +* **enable_lazy_spilo_upgrade** + Instruct operator to update only the statefulsets with new images (Spilo and InitContainers) without immediately doing the rolling update. The assumption is pods will be re-started later with new images, for example due to the node rotation. + The default is `false`. + +* **enable_pgversion_env_var** + With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`. + +* **enable_spilo_wal_path_compat** + enables backwards compatible path between Spilo 12 and Spilo 13 images. The default is `false`. + * **etcd_host** Etcd connection string for Patroni defined as `host:port`. Not required when Patroni native Kubernetes support is used. The default is empty (use Kubernetes-native DCS). +* **kubernetes_use_configmaps** + Select if setup uses endpoints (default), or configmaps to manage leader when + DCS is kubernetes (not etcd or similar). In OpenShift it is not possible to + use endpoints option, and configmaps is required. By default, + `kubernetes_use_configmaps: false`, meaning endpoints will be used. + * **docker_image** Spilo Docker image for Postgres instances. For production, don't rely on the default image, as it might be not the most up-to-date one. Instead, build @@ -87,9 +103,18 @@ Those are top-level keys, containing both leaf keys and groups. repository](https://github.com/zalando/spilo). * **sidecar_docker_images** - a map of sidecar names to Docker images to run with Spilo. In case of the name - conflict with the definition in the cluster manifest the cluster-specific one - is preferred. + *deprecated*: use **sidecars** instead. A map of sidecar names to Docker + images to run with Spilo. In case of the name conflict with the definition in + the cluster manifest the cluster-specific one is preferred. + +* **sidecars** + a list of sidecars to run with Spilo, for any cluster (i.e. globally defined + sidecars). Each item in the list is of type + [Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core). + Globally defined sidecars can be overwritten by specifying a sidecar in the + Postgres manifest with the same name. + Note: This field is not part of the schema validation. If the container + specification is invalid, then the operator fails to create the statefulset. * **enable_shm_volume** Instruct operator to start any new database pod without limitations on shm @@ -101,7 +126,7 @@ Those are top-level keys, containing both leaf keys and groups. * **workers** number of working routines the operator spawns to process requests to - create/update/delete/sync clusters concurrently. The default is `4`. + create/update/delete/sync clusters concurrently. The default is `8`. * **max_instances** operator will cap the number of instances in any managed Postgres cluster up @@ -127,8 +152,9 @@ Those are top-level keys, containing both leaf keys and groups. at the cost of overprovisioning memory and potential scheduling problems for containers with high memory limits due to the lack of memory on Kubernetes cluster nodes. This affects all containers created by the operator (Postgres, - Scalyr sidecar, and other sidecars); to set resources for the operator's own - container, change the [operator deployment manually](../../manifests/postgres-operator.yaml#L20). + Scalyr sidecar, and other sidecars except **sidecars** defined in the operator + configuration); to set resources for the operator's own container, change the + [operator deployment manually](../../manifests/postgres-operator.yaml#L20). The default is `false`. ## Postgres users @@ -187,6 +213,22 @@ configuration they are grouped under the `kubernetes` key. of a database created by the operator. If the annotation key is also provided by the database definition, the database definition value is used. +* **delete_annotation_date_key** + key name for annotation that compares manifest value with current date in the + YYYY-MM-DD format. Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. + The default is empty which also disables this delete protection check. + +* **delete_annotation_name_key** + key name for annotation that compares manifest value with Postgres cluster name. + Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. The default is + empty which also disables this delete protection check. + +* **downscaler_annotations** + An array of annotations that should be passed from Postgres CRD on to the + statefulset and, if exists, to the connection pooler deployment as well. + Regular expressions like `downscaler/*` etc. are also accepted. Can be used + with [kube-downscaler](https://github.com/hjacobs/kube-downscaler). + * **watched_namespace** The operator watches for Postgres objects in the given namespace. If not specified, the value is taken from the operator namespace. A special `*` @@ -207,12 +249,13 @@ configuration they are grouped under the `kubernetes` key. Default is true. * **enable_init_containers** - global option to allow for creating init containers to run actions before - Spilo is started. Default is true. + global option to allow for creating init containers in the cluster manifest to + run actions before Spilo is started. Default is true. * **enable_sidecars** - global option to allow for creating sidecar containers to run alongside Spilo - on the same pod. Default is true. + global option to allow for creating sidecar containers in the cluster manifest + to run alongside Spilo on the same pod. Globally defined sidecars are always + enabled. Default is true. * **secret_name_template** a template for the name of the database user secrets generated by the @@ -228,11 +271,24 @@ configuration they are grouped under the `kubernetes` key. to the Postgres clusters after creation. * **oauth_token_secret_name** - a name of the secret containing the `OAuth2` token to pass to the teams API. - The default is `postgresql-operator`. + namespaced name of the secret containing the `OAuth2` token to pass to the + teams API. The default is `postgresql-operator`. * **infrastructure_roles_secret_name** - name of the secret containing infrastructure roles names and passwords. + *deprecated*: namespaced name of the secret containing infrastructure roles + with user names, passwords and role membership. + +* **infrastructure_roles_secrets** + array of infrastructure role definitions which reference existing secrets + and specify the key names from which user name, password and role membership + are extracted. For the ConfigMap this has to be a string which allows + referencing only one infrastructure roles secret. The default is empty. + +* **inherited_annotations** + list of annotation keys that can be inherited from the cluster manifest, and + added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PDB` and + `Services`) created by the operator incl. the ones from the connection + pooler deployment. The default is empty. * **pod_role_label** name of the label assigned to the Postgres pods (and services/endpoints) by @@ -243,15 +299,16 @@ configuration they are grouped under the `kubernetes` key. objects. The default is `application:spilo`. * **inherited_labels** - list of labels that can be inherited from the cluster manifest, and added to - each child objects (`StatefulSet`, `Pod`, `Service` and `Endpoints`) created - by the operator. Typical use case is to dynamically pass labels that are - specific to a given Postgres cluster, in order to implement `NetworkPolicy`. - The default is empty. + list of label keys that can be inherited from the cluster manifest, and + added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PVCs`, + `PDB`, `Service`, `Endpoints` and `Secrets`) created by the operator. + Typical use case is to dynamically pass labels that are specific to a + given Postgres cluster, in order to implement `NetworkPolicy`. The default + is empty. * **cluster_name_label** - name of the label assigned to Kubernetes objects created by the operator that - indicates which cluster a given object belongs to. The default is + name of the label assigned to Kubernetes objects created by the operator + that indicates which cluster a given object belongs to. The default is `cluster-name`. * **node_readiness_label** @@ -269,17 +326,27 @@ configuration they are grouped under the `kubernetes` key. for details on taints and tolerations. The default is empty. * **pod_environment_configmap** - a name of the ConfigMap with environment variables to populate on every pod. - Right now this ConfigMap is searched in the namespace of the Postgres cluster. - All variables from that ConfigMap are injected to the pod's environment, on - conflicts they are overridden by the environment variables generated by the - operator. The default is empty. + namespaced name of the ConfigMap with environment variables to populate on + every pod. Right now this ConfigMap is searched in the namespace of the + Postgres cluster. All variables from that ConfigMap are injected to the pod's + environment, on conflicts they are overridden by the environment variables + generated by the operator. The default is empty. * **pod_priority_class_name** a name of the [priority class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass) that should be assigned to the Postgres pods. The priority class itself must be defined in advance. Default is empty (use the default priority class). +* **spilo_runasuser** + sets the user ID which should be used in the container to run the process. + This must be set to run the container without root. By default the container + runs with root. This option only works for Spilo versions >= 1.6-p3. + +* **spilo_runasgroup** + sets the group ID which should be used in the container to run the process. + This must be set to run the container without root. By default the container + runs with root. This option only works for Spilo versions >= 1.6-p3. + * **spilo_fsgroup** the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and writable by the group ID specified. This is required to run Spilo as a @@ -291,12 +358,12 @@ configuration they are grouped under the `kubernetes` key. used for AWS volume resizing and not required if you don't need that capability. The default is `false`. - * **master_pod_move_timeout** - The period of time to wait for the success of migration of master pods from - an unschedulable node. The migration includes Patroni switchovers to - respective replicas on healthy nodes. The situation where master pods still - exist on the old node after this timeout expires has to be fixed manually. - The default is 20 minutes. +* **master_pod_move_timeout** + The period of time to wait for the success of migration of master pods from + an unschedulable node. The migration includes Patroni switchovers to + respective replicas on healthy nodes. The situation where master pods still + exist on the old node after this timeout expires has to be fixed manually. + The default is 20 minutes. * **enable_pod_antiaffinity** toggles [pod anti affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) @@ -312,6 +379,15 @@ configuration they are grouped under the `kubernetes` key. of stateful sets of PG clusters. The default is `ordered_ready`, the second possible value is `parallel`. +* **storage_resize_mode** + defines how operator handles the difference between the requested volume size and + the actual size. Available options are: + 1. `ebs` : operator resizes EBS volumes directly and executes `resizefs` within a pod + 2. `pvc` : operator only changes PVC definition + 3. `off` : disables resize of the volumes. + 4. `mixed` :operator uses AWS API to adjust size, throughput, and IOPS, and calls pvc change for file system resize + Default is "pvc". + ## Kubernetes resource requests This group allows you to configure resource requests for the Postgres pods. @@ -381,6 +457,12 @@ CRD-based configuration. Those options affect the behavior of load balancers created by the operator. In the CRD-based configuration they are grouped under the `load_balancer` key. +* **custom_service_annotations** + This key/value map provides a list of annotations that get attached to each + service of a cluster created by the operator. If the annotation key is also + provided by the cluster definition, the manifest value is used. + Optional. + * **db_hosted_zone** DNS zone for the cluster DNS name when the load balancer is configured for the cluster. Only used when combined with @@ -397,11 +479,8 @@ In the CRD-based configuration they are grouped under the `load_balancer` key. cluster. Can be overridden by individual cluster settings. The default is `false`. -* **custom_service_annotations** - This key/value map provides a list of annotations that get attached to each - service of a cluster created by the operator. If the annotation key is also - provided by the cluster definition, the manifest value is used. - Optional. +* **external_traffic_policy** defines external traffic policy for load + balancers. Allowed values are `Cluster` (default) and `Local`. * **master_dns_name_format** defines the DNS name string template for the master load balancer cluster. The default is @@ -430,6 +509,20 @@ yet officially supported. present and accessible by Postgres pods. At the moment, supported services by Spilo are S3 and GCS. The default is empty. +* **wal_gs_bucket** + GCS bucket to use for shipping WAL segments with WAL-E. A bucket has to be + present and accessible by Postgres pods. Note, only the name of the bucket is + required. At the moment, supported services by Spilo are S3 and GCS. + The default is empty. + +* **gcp_credentials** + Used to set the GOOGLE_APPLICATION_CREDENTIALS environment variable for the pods. + This is used in with conjunction with the `additional_secret_mount` and + `additional_secret_mount_path` to properly set the credentials for the spilo + containers. This will allow users to use specific + [service accounts](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform). + The default is empty + * **log_s3_bucket** S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS. The bucket has to be present and accessible by Postgres pods. The default is @@ -445,10 +538,22 @@ yet officially supported. AWS region used to store EBS volumes. The default is `eu-central-1`. * **additional_secret_mount** - Additional Secret (aws or gcp credentials) to mount in the pod. The default is empty. + Additional Secret (aws or gcp credentials) to mount in the pod. + The default is empty. * **additional_secret_mount_path** - Path to mount the above Secret in the filesystem of the container(s). The default is empty. + Path to mount the above Secret in the filesystem of the container(s). + The default is empty. + +* **enable_ebs_gp3_migration** + enable automatic migration on AWS from gp2 to gp3 volumes, that are smaller + than the configured max size (see below). This ignores that EBS gp3 is by + default only 125 MB/sec vs 250 MB/sec for gp2 >= 333GB. + The default is `false`. + +* **enable_ebs_gp3_migration_max_size** + defines the maximum volume size in GB until which auto migration happens. + Default is 1000 (1TB) which matches 3000 IOPS. ## Logical backup @@ -456,38 +561,48 @@ These parameters configure a K8s cron job managed by the operator to produce Postgres logical backups. In the CRD-based configuration those parameters are grouped under the `logical_backup` key. -* **logical_backup_schedule** - Backup schedule in the cron format. Please take the - [reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule) - into account. Default: "30 00 \* \* \*" - * **logical_backup_docker_image** An image for pods of the logical backup job. The [example image](../../docker/logical-backup/Dockerfile) runs `pg_dumpall` on a replica if possible and uploads compressed results to an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`. The default image is the same image built with the Zalando-internal CI - pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup" + pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0" + +* **logical_backup_google_application_credentials** + Specifies the path of the google cloud service account json file. Default is empty. + +* **logical_backup_job_prefix** + The prefix to be prepended to the name of a k8s CronJob running the backups. Beware the prefix counts towards the name length restrictions imposed by k8s. Empty string is a legitimate value. Operator does not do the actual renaming: It simply creates the job with the new prefix. You will have to delete the old cron job manually. Default: "logical-backup-". + +* **logical_backup_provider** + Specifies the storage provider to which the backup should be uploaded (`s3` or `gcs`). + Default: "s3" + +* **logical_backup_s3_access_key_id** + When set, value will be in AWS_ACCESS_KEY_ID env variable. The Default is empty. * **logical_backup_s3_bucket** S3 bucket to store backup results. The bucket has to be present and accessible by Postgres pods. Default: empty. -* **logical_backup_s3_region** - Specifies the region of the bucket which is required with some non-AWS S3 storage services. The default is empty. - * **logical_backup_s3_endpoint** When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty. -* **logical_backup_s3_sse** - Specify server side encription that S3 storage is using. If empty string - is specified, no argument will be passed to `aws s3` command. Default: "AES256". - -* **logical_backup_s3_access_key_id** - When set, value will be in AWS_ACCESS_KEY_ID env variable. The Default is empty. +* **logical_backup_s3_region** + Specifies the region of the bucket which is required with some non-AWS S3 storage services. The default is empty. * **logical_backup_s3_secret_access_key** When set, value will be in AWS_SECRET_ACCESS_KEY env variable. The Default is empty. +* **logical_backup_s3_sse** + Specify server side encryption that S3 storage is using. If empty string + is specified, no argument will be passed to `aws s3` command. Default: "AES256". + +* **logical_backup_schedule** + Backup schedule in the cron format. Please take the + [reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule) + into account. Default: "30 00 \* \* \*" + ## Debugging the operator Options to aid debugging of the operator itself. Grouped under the `debug` key. @@ -528,8 +643,8 @@ key. The default is `"log_statement:all"` * **enable_team_superuser** - whether to grant superuser to team members created from the Teams API. - The default is `false`. + whether to grant superuser to members of the cluster's owning team created + from the Teams API. The default is `false`. * **team_admin_role** role name to grant to team members created from the Teams API. The default is @@ -562,6 +677,16 @@ key. cluster to administer Postgres and maintain infrastructure built around it. The default is empty. +* **enable_postgres_team_crd** + toggle to make the operator watch for created or updated `PostgresTeam` CRDs + and create roles for specified additional teams and members. + The default is `false`. + +* **enable_postgres_team_crd_superusers** + in a `PostgresTeam` CRD additional superuser teams can assigned to teams that + own clusters. With this flag set to `false`, it will be ignored. + The default is `false`. + ## Logging and REST API Parameters affecting logging and REST API listener. In the CRD-based @@ -576,11 +701,12 @@ configuration they are grouped under the `logging_rest_api` key. * **cluster_history_entries** number of entries in the cluster history ring buffer. The default is `1000`. -## Scalyr options +## Scalyr options (*deprecated*) Those parameters define the resource requests/limits and properties of the scalyr sidecar. In the CRD-based configuration they are grouped under the -`scalyr` key. +`scalyr` key. Note, that this section is deprecated. Instead, define Scalyr as +a global sidecar under the `sidecars` key in the configuration. * **scalyr_api_key** API key for the Scalyr sidecar. The default is empty. @@ -602,3 +728,43 @@ scalyr sidecar. In the CRD-based configuration they are grouped under the * **scalyr_memory_limit** Memory limit value for the Scalyr sidecar. The default is `500Mi`. + +## Connection pooler configuration + +Parameters are grouped under the `connection_pooler` top-level key and specify +default configuration for connection pooler, if a postgres manifest requests it +but do not specify some of the parameters. All of them are optional with the +operator being able to provide some reasonable defaults. + +* **connection_pooler_number_of_instances** + How many instances of connection pooler to create. Default is 2 which is also + the required minimum. + +* **connection_pooler_schema** + Database schema to create for credentials lookup function to be used by the + connection pooler. Is is created in every database of the Postgres cluster. + You can also choose an existing schema. Default schema is `pooler`. + +* **connection_pooler_user** + User to create for connection pooler to be able to connect to a database. + You can also choose an existing role, but make sure it has the `LOGIN` + privilege. Default role is `pooler`. + +* **connection_pooler_image** + Docker image to use for connection pooler deployment. + Default: "registry.opensource.zalan.do/acid/pgbouncer" + +* **connection_pooler_max_db_connections** + How many connections the pooler can max hold. This value is divided among the + pooler pods. Default is 60 which will make up 30 connections per pod for the + default setup with two instances. + +* **connection_pooler_mode** + Default pooler mode, `session` or `transaction`. Default is `transaction`. + +* **connection_pooler_default_cpu_request** + **connection_pooler_default_memory_reques** + **connection_pooler_default_cpu_limit** + **connection_pooler_default_memory_limit** + Default resource configuration for connection pooler deployment. The internal + default for memory request and limit is `100Mi`, for CPU it is `500m` and `1`. diff --git a/docs/user.md b/docs/user.md index 295c149bd..ec5941d9e 100644 --- a/docs/user.md +++ b/docs/user.md @@ -30,7 +30,7 @@ spec: databases: foo: zalando postgresql: - version: "11" + version: "12" ``` Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator) @@ -49,37 +49,48 @@ Note, that the name of the cluster must start with the `teamId` and `-`. At Zalando we use team IDs (nicknames) to lower the chance of duplicate cluster names and colliding entities. The team ID would also be used to query an API to get all members of a team and create [database roles](#teams-api-roles) for -them. +them. Besides, the maximum cluster name length is 53 characters. ## Watch pods being created +Check if the database pods are coming up. Use the label `application=spilo` to +filter and list the label `spilo-role` to see when the master is promoted and +replicas get their labels. + ```bash -kubectl get pods -w --show-labels +kubectl get pods -l application=spilo -L spilo-role -w +``` + +The operator also emits K8s events to the Postgresql CRD which can be inspected +in the operator logs or with: + +```bash +kubectl describe postgresql acid-minimal-cluster ``` ## Connect to PostgreSQL With a `port-forward` on one of the database pods (e.g. the master) you can -connect to the PostgreSQL database. Use labels to filter for the master pod of -our test cluster. +connect to the PostgreSQL database from your machine. Use labels to filter for +the master pod of our test cluster. ```bash # get name of master pod of acid-minimal-cluster -export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master) +export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master -n default) # set up port forward -kubectl port-forward $PGMASTER 6432:5432 +kubectl port-forward $PGMASTER 6432:5432 -n default ``` -Open another CLI and connect to the database. Use the generated secret of the -`postgres` robot user to connect to our `acid-minimal-cluster` master running -in Minikube. As non-encrypted connections are rejected by default set the SSL -mode to require: +Open another CLI and connect to the database using e.g. the psql client. +When connecting with the `postgres` user read its password from the K8s secret +which was generated when creating the `acid-minimal-cluster`. As non-encrypted +connections are rejected by default set the SSL mode to `require`: ```bash export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d) export PGSSLMODE=require -psql -U postgres -p 6432 +psql -U postgres -h localhost -p 6432 ``` ## Defining database roles in the operator @@ -94,7 +105,10 @@ created on every cluster managed by the operator. * `teams API roles`: automatically create users for every member of the team owning the database cluster. -In the next sections, we will cover those use cases in more details. +In the next sections, we will cover those use cases in more details. Note, that +the Postgres Operator can also create databases with pre-defined owner, reader +and writer roles which saves you the manual setup. Read more in the next +chapter. ### Manifest roles @@ -136,23 +150,62 @@ user. There are two ways to define them: #### Infrastructure roles secret -The infrastructure roles secret is specified by the `infrastructure_roles_secret_name` -parameter. The role definition looks like this (values are base64 encoded): +Infrastructure roles can be specified by the `infrastructure_roles_secrets` +parameter where you can reference multiple existing secrets. Prior to `v1.6.0` +the operator could only reference one secret with the +`infrastructure_roles_secret_name` option. However, this secret could contain +multiple roles using the same set of keys plus incrementing index. ```yaml -user1: ZGJ1c2Vy -password1: c2VjcmV0 -inrole1: b3BlcmF0b3I= +apiVersion: v1 +kind: Secret +metadata: + name: postgresql-infrastructure-roles +data: + user1: ZGJ1c2Vy + password1: c2VjcmV0 + inrole1: b3BlcmF0b3I= + user2: ... ``` The block above describes the infrastructure role 'dbuser' with password -'secret' that is a member of the 'operator' role. For the following definitions -one must increase the index, i.e. the next role will be defined as 'user2' and -so on. The resulting role will automatically be a login role. +'secret' that is a member of the 'operator' role. The resulting role will +automatically be a login role. -Note that with definitions that solely use the infrastructure roles secret -there is no way to specify role options (like superuser or nologin) or role -memberships. This is where the ConfigMap comes into play. +With the new option users can configure the names of secret keys that contain +the user name, password etc. The secret itself is referenced by the +`secretname` key. If the secret uses a template for multiple roles as described +above list them separately. + +```yaml +apiVersion: v1 +kind: OperatorConfiguration +metadata: + name: postgresql-operator-configuration +configuration: + kubernetes: + infrastructure_roles_secrets: + - secretname: "postgresql-infrastructure-roles" + userkey: "user1" + passwordkey: "password1" + rolekey: "inrole1" + - secretname: "postgresql-infrastructure-roles" + userkey: "user2" + ... +``` + +Note, only the CRD-based configuration allows for referencing multiple secrets. +As of now, the ConfigMap is restricted to either one or the existing template +option with `infrastructure_roles_secret_name`. Please, refer to the example +manifests to understand how `infrastructure_roles_secrets` has to be configured +for the [configmap](../manifests/configmap.yaml) or [CRD configuration](../manifests/postgresql-operator-default-configuration.yaml). + +If both `infrastructure_roles_secret_name` and `infrastructure_roles_secrets` +are defined the operator will create roles for both of them. So make sure, +they do not collide. Note also, that with definitions that solely use the +infrastructure roles secret there is no way to specify role options (like +superuser or nologin) or role memberships. This is where the additional +ConfigMap comes into play. #### Secret plus ConfigMap @@ -216,6 +269,304 @@ to choose superusers, group roles, [PAM configuration](https://github.com/CyberD etc. An OAuth2 token can be passed to the Teams API via a secret. The name for this secret is configurable with the `oauth_token_secret_name` parameter. +### Additional teams and members per cluster + +Postgres clusters are associated with one team by providing the `teamID` in +the manifest. Additional superuser teams can be configured as mentioned in +the previous paragraph. However, this is a global setting. To assign +additional teams, superuser teams and single users to clusters of a given +team, use the [PostgresTeam CRD](../manifests/postgresteam.yaml). + +Note, by default the `PostgresTeam` support is disabled in the configuration. +Switch `enable_postgres_team_crd` flag to `true` and the operator will start to +watch for this CRD. Make sure, the cluster role is up to date and contains a +section for [PostgresTeam](../manifests/operator-service-account-rbac.yaml#L30). + +#### Additional teams + +To assign additional teams and single users to clusters of a given team, +define a mapping with the `PostgresTeam` Kubernetes resource. The Postgres +Operator will read such team mappings each time it syncs all Postgres clusters. + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: PostgresTeam +metadata: + name: custom-team-membership +spec: + additionalTeams: + a-team: + - "b-team" +``` + +With the example above the operator will create login roles for all members +of `b-team` in every cluster owned by `a-team`. It's possible to do vice versa +for clusters of `b-team` in one manifest: + +```yaml +spec: + additionalTeams: + a-team: + - "b-team" + b-team: + - "a-team" +``` + +You see, the `PostgresTeam` CRD is a global team mapping and independent from +the Postgres manifests. It is possible to define multiple mappings, even with +redundant content - the Postgres operator will create one internal cache from +it. Additional teams are resolved transitively, meaning you will also add +users for their `additionalTeams`, e.g.: + +```yaml +spec: + additionalTeams: + a-team: + - "b-team" + - "c-team" + b-team: + - "a-team" +``` + +This creates roles for members of the `c-team` team not only in all clusters +owned by `a-team`, but as well in cluster owned by `b-team`, as `a-team` is +an `additionalTeam` to `b-team` + +Not, you can also define `additionalSuperuserTeams` in the `PostgresTeam` +manifest. By default, this option is disabled and must be configured with +`enable_postgres_team_crd_superusers` to make it work. + +#### Virtual teams + +There can be "virtual teams" that do not exist in the Teams API. It can make +it easier to map a group of teams to many other teams: + +```yaml +spec: + additionalTeams: + a-team: + - "virtual-team" + b-team: + - "virtual-team" + virtual-team: + - "c-team" + - "d-team" +``` + +This example would create roles for members of `c-team` and `d-team` plus +additional `virtual-team` members in clusters owned by `a-team` or `b-team`. + +#### Teams changing their names + +With `PostgresTeams` it is also easy to cover team name changes. Just add +the mapping between old and new team name and the rest can stay the same. +E.g. if team `a-team`'s name would change to `f-team` in the teams API it +could be reflected in a `PostgresTeam` mapping with just two lines: + +```yaml +spec: + additionalTeams: + a-team: + - "f-team" +``` + +This is helpful, because Postgres cluster names are immutable and can not +be changed. Only via cloning it could get a different name starting with the +new `teamID`. + +#### Additional members + +Single members might be excluded from teams although they continue to work +with the same people. However, the teams API would not reflect this anymore. +To still add a database role for former team members list their role under +the `additionalMembers` section of the `PostgresTeam` resource: + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: PostgresTeam +metadata: + name: custom-team-membership +spec: + additionalMembers: + a-team: + - "tia" +``` + +This will create the login role `tia` in every cluster owned by `a-team`. +The user can connect to databases like the other team members. + +The `additionalMembers` map can also be used to define users of virtual +teams, e.g. for `virtual-team` we used above: + +```yaml +spec: + additionalMembers: + virtual-team: + - "flynch" + - "rdecker" + - "briggs" +``` + +## Prepared databases with roles and default privileges + +The `users` section in the manifests only allows for creating database roles +with global privileges. Fine-grained data access control or role membership can +not be defined and must be set up by the user in the database. But, the Postgres +Operator offers a separate section to specify `preparedDatabases` that will be +created with pre-defined owner, reader and writer roles for each individual +database and, optionally, for each database schema, too. `preparedDatabases` +also enable users to specify PostgreSQL extensions that shall be created in a +given database schema. + +### Default database and schema + +A prepared database is already created by adding an empty `preparedDatabases` +section to the manifest. The database will then be called like the Postgres +cluster manifest (`-` are replaced with `_`) and will also contain a schema +called `data`. + +```yaml +spec: + preparedDatabases: {} +``` + +### Default NOLOGIN roles + +Given an example with a specified database and schema: + +```yaml +spec: + preparedDatabases: + foo: + schemas: + bar: {} +``` + +Postgres Operator will create the following NOLOGIN roles: + +| Role name | Member of | Admin | +| -------------- | -------------- | ------------- | +| foo_owner | | admin | +| foo_reader | | foo_owner | +| foo_writer | foo_reader | foo_owner | +| foo_bar_owner | | foo_owner | +| foo_bar_reader | | foo_bar_owner | +| foo_bar_writer | foo_bar_reader | foo_bar_owner | + +The `_owner` role is the database owner and should be used when creating +new database objects. All members of the `admin` role, e.g. teams API roles, can +become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/12/sql-alterdefaultprivileges.html) +are configured for the owner role so that the `_reader` role +automatically gets read-access (SELECT) to new tables and sequences and the +`_writer` receives write-access (INSERT, UPDATE, DELETE on tables, +USAGE and UPDATE on sequences). Both get USAGE on types and EXECUTE on +functions. + +The same principle applies for database schemas which are owned by the +`__owner` role. `__reader` is read-only, +`__writer` has write access and inherit reading from the reader +role. Note, that the `_*` roles have access incl. default privileges on +all schemas, too. If you don't need the dedicated schema roles - i.e. you only +use one schema - you can disable the creation like this: + +```yaml +spec: + preparedDatabases: + foo: + schemas: + bar: + defaultRoles: false +``` + +Then, the schemas are owned by the database owner, too. + +### Default LOGIN roles + +The roles described in the previous paragraph can be granted to LOGIN roles from +the `users` section in the manifest. Optionally, the Postgres Operator can also +create default LOGIN roles for the database an each schema individually. These +roles will get the `_user` suffix and they inherit all rights from their NOLOGIN +counterparts. + +| Role name | Member of | Admin | +| ------------------- | -------------- | ------------- | +| foo_owner_user | foo_owner | admin | +| foo_reader_user | foo_reader | foo_owner | +| foo_writer_user | foo_writer | foo_owner | +| foo_bar_owner_user | foo_bar_owner | foo_owner | +| foo_bar_reader_user | foo_bar_reader | foo_bar_owner | +| foo_bar_writer_user | foo_bar_writer | foo_bar_owner | + +These default users are enabled in the manifest with the `defaultUsers` flag: + +```yaml +spec: + preparedDatabases: + foo: + defaultUsers: true + schemas: + bar: + defaultUsers: true +``` + +### Database extensions + +Prepared databases also allow for creating Postgres extensions. They will be +created by the database owner in the specified schema. + +```yaml +spec: + preparedDatabases: + foo: + extensions: + pg_partman: public + postgis: data +``` + +Some extensions require SUPERUSER rights on creation unless they are not +whitelisted by the [pgextwlist](https://github.com/dimitri/pgextwlist) +extension, that is shipped with the Spilo image. To see which extensions are +on the list check the `extwlist.extension` parameter in the postgresql.conf +file. + +```bash +SHOW extwlist.extensions; +``` + +Make sure that `pgextlist` is also listed under `shared_preload_libraries` in +the PostgreSQL configuration. Then the database owner should be able to create +the extension specified in the manifest. + +### From `databases` to `preparedDatabases` + +If you wish to create the role setup described above for databases listed under +the `databases` key, you have to make sure that the owner role follows the +`_owner` naming convention of `preparedDatabases`. As roles are synced +first, this can be done with one edit: + +```yaml +# before +spec: + databases: + foo: db_owner + +# after +spec: + databases: + foo: foo_owner + preparedDatabases: + foo: + schemas: + my_existing_schema: {} +``` + +Adding existing database schemas to the manifest to create roles for them as +well is up the user and not done by the operator. Remember that if you don't +specify any schema a new database schema called `data` will be created. When +everything got synced (roles, schemas, extensions), you are free to remove the +database from the `databases` section. Note, that the operator does not delete +database objects or revoke privileges when removed from the manifest. + ## Resource definition The compute resources to be used for the Postgres containers in the pods can be @@ -238,7 +589,7 @@ manifest the operator will raise the limits to the configured minimum values. If no resources are defined in the manifest they will be obtained from the configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests). -## Use taints and tolerations for dedicated PostgreSQL nodes +## Use taints, tolerations and node affinity for dedicated PostgreSQL nodes To ensure Postgres pods are running on nodes without any other application pods, you can use [taints and tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) @@ -252,6 +603,28 @@ spec: effect: NoSchedule ``` +If you need the pods to be scheduled on specific nodes you may use [node affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/) +to specify a set of label(s), of which a prospective host node must have at least one. This could be used to +place nodes with certain hardware capabilities (e.g. SSD drives) in certain environments or network segments, +e.g. for PCI compliance. + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: acid-minimal-cluster +spec: + teamId: "ACID" + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: environment + operator: In + values: + - pci +``` + ## How to clone an existing PostgreSQL cluster You can spin up a new cluster as a clone of the existing one, using a `clone` @@ -263,6 +636,10 @@ section in the spec. There are two options here: Note, that cloning can also be used for [major version upgrades](administrator.md#minor-and-major-version-upgrade) of PostgreSQL. +## In-place major version upgrade + +Starting with Spilo 13, operator supports in-place major version upgrade to a higher major version (e.g. from PG 10 to PG 12). To trigger the upgrade, simply increase the version in the manifest. It is your responsibility to test your applications against the new version before the upgrade; downgrading is not supported. The easiest way to do so is to try the upgrade on the cloned cluster first. For details of how Spilo does the upgrade [see here](https://github.com/zalando/spilo/pull/488), operator implementation is described [in the admin docs](administrator.md#minor-and-major-version-upgrade). + ### Clone from S3 Cloning from S3 has the advantage that there is no impact on your production @@ -442,6 +819,8 @@ The PostgreSQL volume is shared with sidecars and is mounted at specified but globally disabled in the configuration. The `enable_sidecars` option must be set to `true`. +If you want to add a sidecar to every cluster managed by the operator, you can specify it in the [operator configuration](administrator.md#sidecars-for-postgres-clusters) instead. + ## InitContainers Support Each cluster can specify arbitrary init containers to run. These containers can @@ -511,3 +890,140 @@ monitoring is outside the scope of operator responsibilities. See [configuration reference](reference/cluster_manifest.md) and [administrator documentation](administrator.md) for details on how backups are executed. + +## Connection pooler + +The operator can create a database side connection pooler for those applications +where an application side pooler is not feasible, but a number of connections is +high. To create a connection pooler together with a database, modify the +manifest: + +```yaml +spec: + enableConnectionPooler: true + enableReplicaConnectionPooler: true +``` + +This will tell the operator to create a connection pooler with default +configuration, through which one can access the master via a separate service +`{cluster-name}-pooler`. With the first option, connection pooler for master service +is created and with the second option, connection pooler for replica is created. +Note that both of these flags are independent of each other and user can set or +unset any of them as per their requirements without any effect on the other. + +In most of the cases the +[default configuration](reference/operator_parameters.md#connection-pooler-configuration) +should be good enough. To configure a new connection pooler individually for +each Postgres cluster, specify: + +``` +spec: + connectionPooler: + # how many instances of connection pooler to create + numberOfInstances: 2 + + # in which mode to run, session or transaction + mode: "transaction" + + # schema, which operator will create in each database + # to install credentials lookup function for connection pooler + schema: "pooler" + + # user, which operator will create for connection pooler + user: "pooler" + + # resources for each instance + resources: + requests: + cpu: 500m + memory: 100Mi + limits: + cpu: "1" + memory: 100Mi +``` + +The `enableConnectionPooler` flag is not required when the `connectionPooler` +section is present in the manifest. But, it can be used to disable/remove the +pooler while keeping its configuration. + +By default, [`PgBouncer`](https://www.pgbouncer.org/) is used as connection pooler. +To find out about pool modes read the `PgBouncer` [docs](https://www.pgbouncer.org/config.html#pooler_mode) +(but it should be the general approach between different implementation). + +Note, that using `PgBouncer` a meaningful resource CPU limit should be 1 core +or less (there is a way to utilize more than one, but in K8s it's easier just to +spin up more instances). + +## Custom TLS certificates + +By default, the Spilo image generates its own TLS certificate during startup. +However, this certificate cannot be verified and thus doesn't protect from +active MITM attacks. In this section we show how to specify a custom TLS +certificate which is mounted in the database pods via a K8s Secret. + +Before applying these changes, in k8s the operator must also be configured with +the `spilo_fsgroup` set to the GID matching the postgres user group. If you +don't know the value, use `103` which is the GID from the default Spilo image +(`spilo_fsgroup=103` in the cluster request spec). + +OpenShift allocates the users and groups dynamically (based on scc), and their +range is different in every namespace. Due to this dynamic behaviour, it's not +trivial to know at deploy time the uid/gid of the user in the cluster. +Therefore, instead of using a global `spilo_fsgroup` setting, use the +`spiloFSGroup` field per Postgres cluster. + +Upload the cert as a kubernetes secret: +```sh +kubectl create secret tls pg-tls \ + --key pg-tls.key \ + --cert pg-tls.crt +``` + +When doing client auth, CA can come optionally from the same secret: +```sh +kubectl create secret generic pg-tls \ + --from-file=tls.crt=server.crt \ + --from-file=tls.key=server.key \ + --from-file=ca.crt=ca.crt +``` + +Then configure the postgres resource with the TLS secret: + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql + +metadata: + name: acid-test-cluster +spec: + tls: + secretName: "pg-tls" + caFile: "ca.crt" # add this if the secret is configured with a CA +``` + +Optionally, the CA can be provided by a different secret: +```sh +kubectl create secret generic pg-tls-ca \ + --from-file=ca.crt=ca.crt +``` + +Then configure the postgres resource with the TLS secret: + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql + +metadata: + name: acid-test-cluster +spec: + tls: + secretName: "pg-tls" # this should hold tls.key and tls.crt + caSecretName: "pg-tls-ca" # this should hold ca.crt + caFile: "ca.crt" # add this if the secret is configured with a CA +``` + +Alternatively, it is also possible to use +[cert-manager](https://cert-manager.io/docs/) to generate these secrets. + +Certificate rotation is handled in the Spilo image which checks every 5 +minutes if the certificates have changed and reloads postgres accordingly. diff --git a/e2e/Dockerfile b/e2e/Dockerfile index 236942d04..3eb8c9d70 100644 --- a/e2e/Dockerfile +++ b/e2e/Dockerfile @@ -1,8 +1,12 @@ -FROM ubuntu:18.04 +# An image to run e2e tests. +# The image does not include the tests; all necessary files are bind-mounted when a container starts. +FROM ubuntu:20.04 LABEL maintainer="Team ACID @ Zalando " -COPY manifests ./manifests -COPY requirements.txt tests ./ +ENV TERM xterm-256color + +COPY requirements.txt ./ +COPY scm-source.json ./ RUN apt-get update \ && apt-get install --no-install-recommends -y \ @@ -10,14 +14,15 @@ RUN apt-get update \ python3-setuptools \ python3-pip \ curl \ + vim \ && pip3 install --no-cache-dir -r requirements.txt \ - && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl \ + && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl \ && chmod +x ./kubectl \ && mv ./kubectl /usr/local/bin/kubectl \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -ARG VERSION=dev -RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" ./__init__.py - -CMD ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"] +# working line +# python3 -m unittest discover -v --failfast -k test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade --start-directory tests +ENTRYPOINT ["python3", "-m", "unittest"] +CMD ["discover","-v","--failfast","--start-directory","/tests"] \ No newline at end of file diff --git a/e2e/Makefile b/e2e/Makefile index 77059f3eb..b904ad763 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -1,6 +1,6 @@ .PHONY: clean copy docker push tools test -BINARY ?= postgres-operator-e2e-tests +BINARY ?= postgres-operator-e2e-tests-runner BUILD_FLAGS ?= -v CGO_ENABLED ?= 0 ifeq ($(RACE),1) @@ -34,15 +34,23 @@ copy: clean mkdir manifests cp ../manifests -r . -docker: copy - docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" . +docker: scm-source.json + docker build -t "$(IMAGE):$(TAG)" . + +scm-source.json: ../.git + echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json push: docker docker push "$(IMAGE):$(TAG)" -tools: docker +tools: # install pinned version of 'kind' - GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1 + # go get must run outside of a dir with a (module-based) Go project ! + # otherwise go get updates project's dependencies and/or behaves differently + cd "/tmp" && GO111MODULE=on go get sigs.k8s.io/kind@v0.9.0 -test: - ./run.sh +e2etest: tools copy clean + ./run.sh main + +cleanup: clean + ./run.sh cleanup \ No newline at end of file diff --git a/e2e/README.md b/e2e/README.md index f1bc5f9ed..5aa987593 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -12,6 +12,10 @@ Docker. Docker Go +# Notice + +The `manifest` folder in e2e tests folder is not commited to git, it comes from `/manifests` + ## Build test runner In the directory of the cloned Postgres Operator repository change to the e2e @@ -29,12 +33,78 @@ runtime. In the e2e folder you can invoke tests either with `make test` or with: ```bash -./run.sh +./run.sh main ``` To run both the build and test step you can invoke `make e2e` from the parent directory. +To run the end 2 end test and keep the kind state execute: +```bash +NOCLEANUP=True ./run.sh main +``` + +## Run individual test + +After having executed a normal E2E run with `NOCLEANUP=True` Kind still continues to run, allowing you subsequent test runs. + +To run an individual test, run the following command in the `e2e` directory + +```bash +NOCLEANUP=True ./run.sh main tests.test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade +``` + +## Inspecting Kind + +If you want to inspect Kind/Kubernetes cluster, switch `kubeconfig` file and context +```bash +# save the old config in case you have it +export KUBECONFIG_SAVED=$KUBECONFIG + +# use the one created by e2e tests +export KUBECONFIG=/tmp/kind-config-postgres-operator-e2e-tests + +# this kubeconfig defines a single context +kubectl config use-context kind-postgres-operator-e2e-tests +``` + +or use the following script to exec into the K8s setup and then use `kubectl` + +```bash +./exec_into_env.sh + +# use kubectl +kubectl get pods + +# watch relevant objects +./scripts/watch_objects.sh + +# get operator logs +./scripts/get_logs.sh +``` + +If you want to inspect the state of the `kind` cluster manually with a single command, add a `context` flag +```bash +kubectl get pods --context kind-kind +``` +or set the context for a few commands at once + + + +## Cleaning up Kind + +To cleanup kind and start fresh + +```bash +e2e/run.sh cleanup +``` + +That also helps in case you see the +``` +ERROR: no nodes found for cluster "postgres-operator-e2e-tests" +``` +that happens when the `kind` cluster was deleted manually but its configuraiton file was not. + ## Covered use cases The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py): diff --git a/e2e/exec.sh b/e2e/exec.sh new file mode 100755 index 000000000..1ab666e5e --- /dev/null +++ b/e2e/exec.sh @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +kubectl exec -i $1 -- sh -c "$2" diff --git a/e2e/exec_into_env.sh b/e2e/exec_into_env.sh new file mode 100755 index 000000000..ef12ba18a --- /dev/null +++ b/e2e/exec_into_env.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +export cluster_name="postgres-operator-e2e-tests" +export kubeconfig_path="/tmp/kind-config-${cluster_name}" +export operator_image="registry.opensource.zalan.do/acid/postgres-operator:latest" +export e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3" + +docker run -it --entrypoint /bin/bash --network=host -e "TERM=xterm-256color" \ + --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \ + --mount type=bind,source="$(readlink -f manifests)",target=/manifests \ + --mount type=bind,source="$(readlink -f tests)",target=/tests \ + --mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \ + --mount type=bind,source="$(readlink -f scripts)",target=/scripts \ + -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}" diff --git a/e2e/kind-cluster-postgres-operator-e2e-tests.yaml b/e2e/kind-cluster-postgres-operator-e2e-tests.yaml index a59746fd3..752e993cd 100644 --- a/e2e/kind-cluster-postgres-operator-e2e-tests.yaml +++ b/e2e/kind-cluster-postgres-operator-e2e-tests.yaml @@ -1,5 +1,5 @@ kind: Cluster -apiVersion: kind.sigs.k8s.io/v1alpha3 +apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane - role: worker diff --git a/e2e/requirements.txt b/e2e/requirements.txt index 68a8775ff..4f6f5ac5f 100644 --- a/e2e/requirements.txt +++ b/e2e/requirements.txt @@ -1,3 +1,3 @@ -kubernetes==9.0.0 +kubernetes==11.0.0 timeout_decorator==0.4.1 -pyyaml==5.1 +pyyaml==5.3.1 diff --git a/e2e/run.sh b/e2e/run.sh index c7825bfd3..2d5708778 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -6,71 +6,86 @@ set -o nounset set -o pipefail IFS=$'\n\t' -cd $(dirname "$0"); - readonly cluster_name="postgres-operator-e2e-tests" readonly kubeconfig_path="/tmp/kind-config-${cluster_name}" +readonly spilo_image="registry.opensource.zalan.do/acid/spilo-13-e2e:0.3" +readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3" + +export GOPATH=${GOPATH-~/go} +export PATH=${GOPATH}/bin:$PATH + +echo "Clustername: ${cluster_name}" +echo "Kubeconfig path: ${kubeconfig_path}" function pull_images(){ - operator_tag=$(git describe --tags --always --dirty) if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]] then docker pull registry.opensource.zalan.do/acid/postgres-operator:latest fi - if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:${operator_tag}) ]] - then - docker pull registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:latest - fi - operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1) - e2e_test_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests" --format "{{.Repository}}:{{.Tag}}" | head -1) } function start_kind(){ - + echo "Starting kind for e2e tests" # avoid interference with previous test runs if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]] then kind delete cluster --name ${cluster_name} fi - kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml + export KUBECONFIG="${kubeconfig_path}" + kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml + docker pull "${spilo_image}" + kind load docker-image "${spilo_image}" --name ${cluster_name} +} + +function load_operator_image() { + echo "Loading operator image" + export KUBECONFIG="${kubeconfig_path}" kind load docker-image "${operator_image}" --name ${cluster_name} - kind load docker-image "${e2e_test_image}" --name ${cluster_name} - KUBECONFIG="$(kind get kubeconfig-path --name=${cluster_name})" - export KUBECONFIG } function set_kind_api_server_ip(){ + echo "Setting up kind API server ip" # use the actual kubeconfig to connect to the 'kind' API server # but update the IP address of the API server to the one from the Docker 'bridge' network - cp "${KUBECONFIG}" /tmp readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase - readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane) + readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane) sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}" } function run_tests(){ + echo "Running tests... image: ${e2e_test_runner_image}" + # tests modify files in ./manifests, so we mount a copy of this directory done by the e2e Makefile - docker run --rm --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_image}" + docker run --rm --network=host -e "TERM=xterm-256color" \ + --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \ + --mount type=bind,source="$(readlink -f manifests)",target=/manifests \ + --mount type=bind,source="$(readlink -f tests)",target=/tests \ + --mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \ + --mount type=bind,source="$(readlink -f scripts)",target=/scripts \ + -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}" ${E2E_TEST_CASE-} $@ } -function clean_up(){ +function cleanup(){ + echo "Executing cleanup" unset KUBECONFIG kind delete cluster --name ${cluster_name} rm -rf ${kubeconfig_path} } function main(){ - - trap "clean_up" QUIT TERM EXIT - + echo "Entering main function..." + [[ -z ${NOCLEANUP-} ]] && trap "cleanup" QUIT TERM EXIT pull_images - start_kind + [[ ! -f ${kubeconfig_path} ]] && start_kind + load_operator_image set_kind_api_server_ip - run_tests + + shift + run_tests $@ exit 0 } -main "$@" +"$1" $@ diff --git a/e2e/scripts/cleanup.sh b/e2e/scripts/cleanup.sh new file mode 100755 index 000000000..2c82388ae --- /dev/null +++ b/e2e/scripts/cleanup.sh @@ -0,0 +1,7 @@ +#!/bin/bash +kubectl delete postgresql acid-minimal-cluster +kubectl delete deployments -l application=db-connection-pooler,cluster-name=acid-minimal-cluster +kubectl delete statefulsets -l application=spilo,cluster-name=acid-minimal-cluster +kubectl delete services -l application=spilo,cluster-name=acid-minimal-cluster +kubectl delete configmap postgres-operator +kubectl delete deployment postgres-operator \ No newline at end of file diff --git a/e2e/scripts/get_logs.sh b/e2e/scripts/get_logs.sh new file mode 100755 index 000000000..1639f3995 --- /dev/null +++ b/e2e/scripts/get_logs.sh @@ -0,0 +1,2 @@ +#!/bin/bash +kubectl logs $(kubectl get pods -l name=postgres-operator --field-selector status.phase=Running -o jsonpath='{.items..metadata.name}') diff --git a/e2e/scripts/watch_objects.sh b/e2e/scripts/watch_objects.sh new file mode 100755 index 000000000..4c9b82404 --- /dev/null +++ b/e2e/scripts/watch_objects.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +watch -c " +kubectl get postgresql --all-namespaces +echo +echo -n 'Rolling upgrade pending: ' +kubectl get statefulset -o jsonpath='{.items..metadata.annotations.zalando-postgres-operator-rolling-update-required}' +echo +echo +echo 'Pods' +kubectl get pods -l application=spilo -o wide --all-namespaces +echo +kubectl get pods -l application=db-connection-pooler -o wide --all-namespaces +echo +echo 'Statefulsets' +kubectl get statefulsets --all-namespaces +echo +echo 'Deployments' +kubectl get deployments --all-namespaces -l application=db-connection-pooler +kubectl get deployments --all-namespaces -l application=postgres-operator +echo +echo +echo 'Step from operator deployment' +kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.annotations.step}' +echo +echo +echo 'Spilo Image in statefulset' +kubectl get pods -l application=spilo -o jsonpath='{.items..spec.containers..image}' +echo +echo +echo 'Queue Status' +kubectl exec -it \$(kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.name}') -- curl localhost:8080/workers/all/status/ +echo" \ No newline at end of file diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py new file mode 100644 index 000000000..95e1dc9ad --- /dev/null +++ b/e2e/tests/k8s_api.py @@ -0,0 +1,532 @@ +import json +import time +import subprocess +import warnings + +from kubernetes import client, config +from kubernetes.client.rest import ApiException + + +def to_selector(labels): + return ",".join(["=".join(lbl) for lbl in labels.items()]) + + +class K8sApi: + + def __init__(self): + + # https://github.com/kubernetes-client/python/issues/309 + warnings.simplefilter("ignore", ResourceWarning) + + self.config = config.load_kube_config() + self.k8s_client = client.ApiClient() + + self.core_v1 = client.CoreV1Api() + self.apps_v1 = client.AppsV1Api() + self.batch_v1_beta1 = client.BatchV1beta1Api() + self.custom_objects_api = client.CustomObjectsApi() + self.policy_v1_beta1 = client.PolicyV1beta1Api() + self.storage_v1_api = client.StorageV1Api() + + +class K8s: + ''' + Wraps around K8s api client and helper methods. + ''' + + RETRY_TIMEOUT_SEC = 1 + + def __init__(self, labels='x=y', namespace='default'): + self.api = K8sApi() + self.labels = labels + self.namespace = namespace + + def get_pg_nodes(self, pg_cluster_name, namespace='default'): + master_pod_node = '' + replica_pod_nodes = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master': + master_pod_node = pod.spec.node_name + elif pod.metadata.labels.get('spilo-role') == 'replica': + replica_pod_nodes.append(pod.spec.node_name) + + return master_pod_node, replica_pod_nodes + + def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'): + m = [] + r = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + m.append(pod.spec.node_name) + elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': + r.append(pod.spec.node_name) + + return m, r + + def wait_for_operator_pod_start(self): + self.wait_for_pod_start("name=postgres-operator") + # give operator time to subscribe to objects + time.sleep(1) + return True + + def get_operator_pod(self): + pods = self.api.core_v1.list_namespaced_pod( + 'default', label_selector='name=postgres-operator' + ).items + + pods = list(filter(lambda x: x.status.phase == 'Running', pods)) + + if len(pods): + return pods[0] + + return None + + def get_operator_log(self): + operator_pod = self.get_operator_pod() + pod_name = operator_pod.metadata.name + return self.api.core_v1.read_namespaced_pod_log( + name=pod_name, + namespace='default' + ) + + def pg_get_status(self, name="acid-minimal-cluster", namespace="default"): + pg = self.api.custom_objects_api.get_namespaced_custom_object( + "acid.zalan.do", "v1", namespace, "postgresqls", name) + return pg.get("status", {}).get("PostgresClusterStatus", None) + + def wait_for_pod_start(self, pod_labels, namespace='default'): + pod_phase = 'No pod running' + while pod_phase != 'Running': + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items + if pods: + pod_phase = pods[0].status.phase + + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_service_type(self, svc_labels, namespace='default'): + svc_type = '' + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + svc_type = svc.spec.type + return svc_type + + def check_service_annotations(self, svc_labels, annotations, namespace='default'): + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + for key, value in annotations.items(): + if not svc.metadata.annotations or key not in svc.metadata.annotations or svc.metadata.annotations[key] != value: + print("Expected key {} not found in service annotations {}".format(key, svc.metadata.annotations)) + return False + return True + + def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items + for sset in ssets: + for key, value in annotations.items(): + if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value: + print("Expected key {} not found in statefulset annotations {}".format(key, sset.metadata.annotations)) + return False + return True + + def scale_cluster(self, number_of_instances, name="acid-minimal-cluster", namespace="default"): + body = { + "spec": { + "numberOfInstances": number_of_instances + } + } + self.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", namespace, "postgresqls", name, body) + + def wait_for_running_pods(self, labels, number, namespace=''): + while self.count_pods_with_label(labels) != number: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_pods_to_stop(self, labels, namespace=''): + while self.count_pods_with_label(labels) != 0: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_service(self, labels, namespace='default'): + def get_services(): + return self.api.core_v1.list_namespaced_service( + namespace, label_selector=labels + ).items + + while not get_services(): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def count_pods_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) + + def count_services_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items) + + def count_endpoints_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items) + + def count_secrets_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items) + + def count_statefulsets_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items) + + def count_deployments_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items) + + def count_pdbs_with_label(self, labels, namespace='default'): + return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget( + namespace, label_selector=labels).items) + + def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: x.status.phase == 'Running', pods))) + + def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): + pod_phase = 'Failing over' + new_pod_node = '' + + while (pod_phase != 'Running') or (new_pod_node not in failover_targets): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + if pods: + new_pod_node = pods[0].spec.node_name + pod_phase = pods[0].status.phase + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_logical_backup_job(self, namespace='default'): + return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") + + def wait_for_logical_backup_job(self, expected_num_of_jobs): + while (len(self.get_logical_backup_job().items) != expected_num_of_jobs): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_logical_backup_job_deletion(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=0) + + def wait_for_logical_backup_job_creation(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=1) + + def delete_operator_pod(self, step="Delete operator pod"): + # patching the pod template in the deployment restarts the operator pod + self.api.apps_v1.patch_namespaced_deployment("postgres-operator", "default", {"spec": {"template": {"metadata": {"annotations": {"step": "{}-{}".format(step, time.time())}}}}}) + self.wait_for_operator_pod_start() + + def update_config(self, config_map_patch, step="Updating operator deployment"): + self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) + self.delete_operator_pod(step=step) + + def patch_statefulset(self, data, name="acid-minimal-cluster", namespace="default"): + self.api.apps_v1.patch_namespaced_stateful_set(name, namespace, data) + + def create_with_kubectl(self, path): + return subprocess.run( + ["kubectl", "apply", "-f", path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def exec_with_kubectl(self, pod, cmd): + return subprocess.run(["./exec.sh", pod, cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def get_patroni_state(self, pod): + r = self.exec_with_kubectl(pod, "patronictl list -f json") + if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[": + return [] + return json.loads(r.stdout.decode()) + + def get_operator_state(self): + pod = self.get_operator_pod() + if pod is None: + return None + pod = pod.metadata.name + + r = self.exec_with_kubectl(pod, "curl localhost:8080/workers/all/status/") + if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{": + return None + + return json.loads(r.stdout.decode()) + + def get_patroni_running_members(self, pod="acid-minimal-cluster-0"): + result = self.get_patroni_state(pod) + return list(filter(lambda x: "State" in x and x["State"] == "running", result)) + + def get_deployment_replica_count(self, name="acid-minimal-cluster-pooler", namespace="default"): + try: + deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace) + return deployment.spec.replicas + except ApiException: + return None + + def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1) + if len(ssets.items) == 0: + return None + return ssets.items[0].spec.template.spec.containers[0].image + + def get_effective_pod_image(self, pod_name, namespace='default'): + ''' + Get the Spilo image pod currently uses. In case of lazy rolling updates + it may differ from the one specified in the stateful set. + ''' + pod = self.api.core_v1.list_namespaced_pod( + namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name) + + if len(pod.items) == 0: + return None + return pod.items[0].spec.containers[0].image + + def get_cluster_leader_pod(self, pg_cluster_name, namespace='default'): + labels = { + 'application': 'spilo', + 'cluster-name': pg_cluster_name, + 'spilo-role': 'master', + } + + pods = self.api.core_v1.list_namespaced_pod( + namespace, label_selector=to_selector(labels)).items + + if pods: + return pods[0] + + +class K8sBase: + ''' + K8s basic API wrapper class supposed to be inherited by other more specific classes for e2e tests + ''' + + RETRY_TIMEOUT_SEC = 1 + + def __init__(self, labels='x=y', namespace='default'): + self.api = K8sApi() + self.labels = labels + self.namespace = namespace + + def get_pg_nodes(self, pg_cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'): + master_pod_node = '' + replica_pod_nodes = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master': + master_pod_node = pod.spec.node_name + elif pod.metadata.labels.get('spilo-role') == 'replica': + replica_pod_nodes.append(pod.spec.node_name) + + return master_pod_node, replica_pod_nodes + + def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'): + m = [] + r = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + m.append(pod.spec.node_name) + elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': + r.append(pod.spec.node_name) + + return m, r + + def wait_for_operator_pod_start(self): + self.wait_for_pod_start("name=postgres-operator") + + def get_operator_pod(self): + pods = self.api.core_v1.list_namespaced_pod( + 'default', label_selector='name=postgres-operator' + ).items + + if pods: + return pods[0] + + return None + + def get_operator_log(self): + operator_pod = self.get_operator_pod() + pod_name = operator_pod.metadata.name + return self.api.core_v1.read_namespaced_pod_log( + name=pod_name, + namespace='default' + ) + + def wait_for_pod_start(self, pod_labels, namespace='default'): + pod_phase = 'No pod running' + while pod_phase != 'Running': + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items + if pods: + pod_phase = pods[0].status.phase + + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_service_type(self, svc_labels, namespace='default'): + svc_type = '' + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + svc_type = svc.spec.type + return svc_type + + def check_service_annotations(self, svc_labels, annotations, namespace='default'): + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + for key, value in annotations.items(): + if key not in svc.metadata.annotations or svc.metadata.annotations[key] != value: + print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotation)) + return False + return True + + def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items + for sset in ssets: + for key, value in annotations.items(): + if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value: + print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotation)) + return False + return True + + def scale_cluster(self, number_of_instances, name="acid-minimal-cluster", namespace="default"): + body = { + "spec": { + "numberOfInstances": number_of_instances + } + } + self.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", namespace, "postgresqls", name, body) + + def wait_for_running_pods(self, labels, number, namespace=''): + while self.count_pods_with_label(labels) != number: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_pods_to_stop(self, labels, namespace=''): + while self.count_pods_with_label(labels) != 0: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_service(self, labels, namespace='default'): + def get_services(): + return self.api.core_v1.list_namespaced_service( + namespace, label_selector=labels + ).items + + while not get_services(): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def count_pods_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) + + def count_services_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items) + + def count_endpoints_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items) + + def count_secrets_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items) + + def count_statefulsets_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items) + + def count_deployments_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items) + + def count_pdbs_with_label(self, labels, namespace='default'): + return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget( + namespace, label_selector=labels).items) + + def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: x.status.phase == 'Running', pods))) + + def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): + pod_phase = 'Failing over' + new_pod_node = '' + + while (pod_phase != 'Running') or (new_pod_node not in failover_targets): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + if pods: + new_pod_node = pods[0].spec.node_name + pod_phase = pods[0].status.phase + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_logical_backup_job(self, namespace='default'): + return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") + + def wait_for_logical_backup_job(self, expected_num_of_jobs): + while (len(self.get_logical_backup_job().items) != expected_num_of_jobs): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_logical_backup_job_deletion(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=0) + + def wait_for_logical_backup_job_creation(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=1) + + def delete_operator_pod(self, step="Delete operator deplyment"): + self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}}) + self.wait_for_operator_pod_start() + + def update_config(self, config_map_patch, step="Updating operator deployment"): + self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) + self.delete_operator_pod(step=step) + + def create_with_kubectl(self, path): + return subprocess.run( + ["kubectl", "apply", "-f", path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def exec_with_kubectl(self, pod, cmd): + return subprocess.run(["./exec.sh", pod, cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def get_patroni_state(self, pod): + r = self.exec_with_kubectl(pod, "patronictl list -f json") + if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[": + return [] + return json.loads(r.stdout.decode()) + + def get_patroni_running_members(self, pod): + result = self.get_patroni_state(pod) + return list(filter(lambda x: x["State"] == "running", result)) + + def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1) + if len(ssets.items) == 0: + return None + return ssets.items[0].spec.template.spec.containers[0].image + + def get_effective_pod_image(self, pod_name, namespace='default'): + ''' + Get the Spilo image pod currently uses. In case of lazy rolling updates + it may differ from the one specified in the stateful set. + ''' + pod = self.api.core_v1.list_namespaced_pod( + namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name) + + if len(pod.items) == 0: + return None + return pod.items[0].spec.containers[0].image + + +""" + Inspiriational classes towards easier writing of end to end tests with one cluster per test case +""" + + +class K8sOperator(K8sBase): + def __init__(self, labels="name=postgres-operator", namespace="default"): + super().__init__(labels, namespace) + + +class K8sPostgres(K8sBase): + def __init__(self, labels="cluster-name=acid-minimal-cluster", namespace="default"): + super().__init__(labels, namespace) + + def get_pg_nodes(self): + master_pod_node = '' + replica_pod_nodes = [] + podsList = self.api.core_v1.list_namespaced_pod(self.namespace, label_selector=self.labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master': + master_pod_node = pod.spec.node_name + elif pod.metadata.labels.get('spilo-role') == 'replica': + replica_pod_nodes.append(pod.spec.node_name) + + return master_pod_node, replica_pod_nodes diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 12106601e..ecc0b2327 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -1,12 +1,30 @@ +import json import unittest import time import timeout_decorator -import subprocess -import warnings import os import yaml -from kubernetes import client, config +from datetime import datetime +from kubernetes import client + +from tests.k8s_api import K8s +from kubernetes.client.rest import ApiException + +SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-13-e2e:0.3" +SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-13-e2e:0.4" + + +def to_selector(labels): + return ",".join(["=".join(lbl) for lbl in labels.items()]) + + +def clean_list(values): + # value is not stripped bytes, strip and convert to a string + clean = lambda v: v.strip().decode() + notNone = lambda v: v + + return list(filter(notNone, map(clean, values))) class EndToEndTestCase(unittest.TestCase): @@ -17,6 +35,41 @@ class EndToEndTestCase(unittest.TestCase): # `kind` pods may stuck in the `Terminating` phase for a few minutes; hence high test timeout TEST_TIMEOUT_SEC = 600 + def eventuallyEqual(self, f, x, m, retries=60, interval=2): + while True: + try: + y = f() + self.assertEqual(y, x, m.format(y)) + return True + except AssertionError: + retries = retries - 1 + if not retries > 0: + raise + time.sleep(interval) + + def eventuallyNotEqual(self, f, x, m, retries=60, interval=2): + while True: + try: + y = f() + self.assertNotEqual(y, x, m.format(y)) + return True + except AssertionError: + retries = retries - 1 + if not retries > 0: + raise + time.sleep(interval) + + def eventuallyTrue(self, f, m, retries=60, interval=2): + while True: + try: + self.assertTrue(f(), m) + return True + except AssertionError: + retries = retries - 1 + if not retries > 0: + raise + time.sleep(interval) + @classmethod @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def setUpClass(cls): @@ -28,219 +81,507 @@ class EndToEndTestCase(unittest.TestCase): In the case of test failure the cluster will stay to enable manual examination; next invocation of "make test" will re-create it. ''' + print("Test Setup being executed") # set a single K8s wrapper for all tests k8s = cls.k8s = K8s() + # remove existing local storage class and create hostpath class + try: + k8s.api.storage_v1_api.delete_storage_class("standard") + except ApiException as e: + print("Failed to delete the 'standard' storage class: {0}".format(e)) + # operator deploys pod service account there on start up # needed for test_multi_namespace_support() - cls.namespace = "test" - v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.namespace)) - k8s.api.core_v1.create_namespace(v1_namespace) + cls.test_namespace = "test" + try: + v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.test_namespace)) + k8s.api.core_v1.create_namespace(v1_namespace) + except ApiException as e: + print("Failed to create the '{0}' namespace: {1}".format(cls.test_namespace, e)) # submit the most recent operator image built on the Docker host with open("manifests/postgres-operator.yaml", 'r+') as f: operator_deployment = yaml.safe_load(f) operator_deployment["spec"]["template"]["spec"]["containers"][0]["image"] = os.environ['OPERATOR_IMAGE'] + + with open("manifests/postgres-operator.yaml", 'w') as f: yaml.dump(operator_deployment, f, Dumper=yaml.Dumper) + with open("manifests/configmap.yaml", 'r+') as f: + configmap = yaml.safe_load(f) + configmap["data"]["workers"] = "1" + configmap["data"]["docker_image"] = SPILO_CURRENT + + with open("manifests/configmap.yaml", 'w') as f: + yaml.dump(configmap, f, Dumper=yaml.Dumper) + for filename in ["operator-service-account-rbac.yaml", + "postgresql.crd.yaml", + "operatorconfiguration.crd.yaml", + "postgresteam.crd.yaml", "configmap.yaml", - "postgres-operator.yaml"]: - k8s.create_with_kubectl("manifests/" + filename) + "postgres-operator.yaml", + "api-service.yaml", + "infrastructure-roles.yaml", + "infrastructure-roles-new.yaml", + "e2e-storage-class.yaml"]: + result = k8s.create_with_kubectl("manifests/" + filename) + print("stdout: {}, stderr: {}".format(result.stdout, result.stderr)) k8s.wait_for_operator_pod_start() + # reset taints and tolerations + k8s.api.core_v1.patch_node("postgres-operator-e2e-tests-worker", {"spec": {"taints": []}}) + k8s.api.core_v1.patch_node("postgres-operator-e2e-tests-worker2", {"spec": {"taints": []}}) + + # make sure we start a new operator on every new run, + # this tackles the problem when kind is reused + # and the Docker image is in fact changed (dirty one) + + k8s.update_config({}, step="TestSuite Startup") + actual_operator_image = k8s.api.core_v1.list_namespaced_pod( 'default', label_selector='name=postgres-operator').items[0].spec.containers[0].image print("Tested operator image: {}".format(actual_operator_image)) # shows up after tests finish - k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") - k8s.wait_for_pod_start('spilo-role=master') + result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") + print('stdout: {}, stderr: {}'.format(result.stdout, result.stderr)) + try: + k8s.wait_for_pod_start('spilo-role=master') + k8s.wait_for_pod_start('spilo-role=replica') + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_overwrite_pooler_deployment(self): + self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml") + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 1, + "Initial broken deployment not rolled out") + + self.k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': True + } + }) + + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 2, + "Operator did not succeed in overwriting labels") + + self.k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': False + } + }) + + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(lambda: self.k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), + 0, "Pooler pods not scaled down") + + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_enable_disable_connection_pooler(self): + ''' + For a database without connection pooler, then turns it on, scale up, + turn off and on again. Test with different ways of doing this (via + enableConnectionPooler or connectionPooler configuration section). At + the end turn connection pooler off to not interfere with other tests. + ''' + k8s = self.k8s + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': True, + 'enableReplicaConnectionPooler': True, + } + }) + + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 2, + "Deployment replicas is 2 default") + self.eventuallyEqual(lambda: k8s.count_running_pods( + "connection-pooler=acid-minimal-cluster-pooler"), + 2, "No pooler pods found") + self.eventuallyEqual(lambda: k8s.count_running_pods( + "connection-pooler=acid-minimal-cluster-pooler-repl"), + 2, "No pooler replica pods found") + self.eventuallyEqual(lambda: k8s.count_services_with_label( + 'application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 2, "No pooler service found") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "Pooler secret not created") + + # Turn off only master connection pooler + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': False, + 'enableReplicaConnectionPooler': True, + } + }) + + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, + "Operator does not get in sync") + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler-repl"), 2, + "Deployment replicas is 2 default") + self.eventuallyEqual(lambda: k8s.count_running_pods( + "connection-pooler=acid-minimal-cluster-pooler"), + 0, "Master pooler pods not deleted") + self.eventuallyEqual(lambda: k8s.count_running_pods( + "connection-pooler=acid-minimal-cluster-pooler-repl"), + 2, "Pooler replica pods not found") + self.eventuallyEqual(lambda: k8s.count_services_with_label( + 'application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "No pooler service found") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "Secret not created") + + # Turn off only replica connection pooler + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': True, + 'enableReplicaConnectionPooler': False, + } + }) + + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, + "Operator does not get in sync") + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 2, + "Deployment replicas is 2 default") + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), + 2, "Master pooler pods not found") + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler-repl"), + 0, "Pooler replica pods not deleted") + self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "No pooler service found") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "Secret not created") + + # scale up connection pooler deployment + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'connectionPooler': { + 'numberOfInstances': 3, + }, + } + }) + + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 3, + "Deployment replicas is scaled to 3") + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), + 3, "Scale up of pooler pods does not work") + + # turn it off, keeping config should be overwritten by false + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': False, + 'enableReplicaConnectionPooler': False, + } + }) + + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), + 0, "Pooler pods not scaled down") + self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 0, "Pooler service not removed") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=spilo,cluster-name=acid-minimal-cluster'), + 4, "Secrets not deleted") + + # Verify that all the databases have pooler schema installed. + # Do this via psql, since otherwise we need to deal with + # credentials. + dbList = [] + + leader = k8s.get_cluster_leader_pod('acid-minimal-cluster') + dbListQuery = "select datname from pg_database" + schemasQuery = """ + select schema_name + from information_schema.schemata + where schema_name = 'pooler' + """ + exec_query = r"psql -tAq -c \"{}\" -d {}" + + if leader: + try: + q = exec_query.format(dbListQuery, "postgres") + q = "su postgres -c \"{}\"".format(q) + print('Get databases: {}'.format(q)) + result = k8s.exec_with_kubectl(leader.metadata.name, q) + dbList = clean_list(result.stdout.split(b'\n')) + print('dbList: {}, stdout: {}, stderr {}'.format( + dbList, result.stdout, result.stderr + )) + except Exception as ex: + print('Could not get databases: {}'.format(ex)) + print('Stdout: {}'.format(result.stdout)) + print('Stderr: {}'.format(result.stderr)) + + for db in dbList: + if db in ('template0', 'template1'): + continue + + schemas = [] + try: + q = exec_query.format(schemasQuery, db) + q = "su postgres -c \"{}\"".format(q) + print('Get schemas: {}'.format(q)) + result = k8s.exec_with_kubectl(leader.metadata.name, q) + schemas = clean_list(result.stdout.split(b'\n')) + print('schemas: {}, stdout: {}, stderr {}'.format( + schemas, result.stdout, result.stderr + )) + except Exception as ex: + print('Could not get databases: {}'.format(ex)) + print('Stdout: {}'.format(result.stdout)) + print('Stderr: {}'.format(result.stderr)) + + self.assertNotEqual(len(schemas), 0) + else: + print('Could not find leader pod') + + # remove config section to make test work next time + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'connectionPooler': None, + 'EnableReplicaConnectionPooler': False, + } + }) @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_enable_load_balancer(self): ''' - Test if services are updated when enabling/disabling load balancers + Test if services are updated when enabling/disabling load balancers in Postgres manifest ''' k8s = self.k8s - cluster_label = 'cluster-name=acid-minimal-cluster' + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role={}' - # enable load balancer services - pg_patch_enable_lbs = { - "spec": { - "enableMasterLoadBalancer": True, - "enableReplicaLoadBalancer": True - } - } - k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) - # wait for service recreation - time.sleep(60) + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + 'ClusterIP', + "Expected ClusterIP type initially, found {}") - master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') - self.assertEqual(master_svc_type, 'LoadBalancer', - "Expected LoadBalancer service type for master, found {}".format(master_svc_type)) - - repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') - self.assertEqual(repl_svc_type, 'LoadBalancer', - "Expected LoadBalancer service type for replica, found {}".format(repl_svc_type)) - - # disable load balancer services again - pg_patch_disable_lbs = { - "spec": { - "enableMasterLoadBalancer": False, - "enableReplicaLoadBalancer": False - } - } - k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) - # wait for service recreation - time.sleep(60) - - master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') - self.assertEqual(master_svc_type, 'ClusterIP', - "Expected ClusterIP service type for master, found {}".format(master_svc_type)) - - repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') - self.assertEqual(repl_svc_type, 'ClusterIP', - "Expected ClusterIP service type for replica, found {}".format(repl_svc_type)) - - @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_min_resource_limits(self): - ''' - Lower resource limits below configured minimum and let operator fix it - ''' - k8s = self.k8s - cluster_label = 'cluster-name=acid-minimal-cluster' - _, failover_targets = k8s.get_pg_nodes(cluster_label) - - # configure minimum boundaries for CPU and memory limits - minCPULimit = '500m' - minMemoryLimit = '500Mi' - patch_min_resource_limits = { - "data": { - "min_cpu_limit": minCPULimit, - "min_memory_limit": minMemoryLimit - } - } - k8s.update_config(patch_min_resource_limits) - - # lower resource limits below minimum - pg_patch_resources = { - "spec": { - "resources": { - "requests": { - "cpu": "10m", - "memory": "50Mi" - }, - "limits": { - "cpu": "200m", - "memory": "200Mi" - } + try: + # enable load balancer services + pg_patch_enable_lbs = { + "spec": { + "enableMasterLoadBalancer": True, + "enableReplicaLoadBalancer": True } } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) + + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + 'LoadBalancer', + "Expected LoadBalancer service type for master, found {}") + + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("replica")), + 'LoadBalancer', + "Expected LoadBalancer service type for master, found {}") + + # disable load balancer services again + pg_patch_disable_lbs = { + "spec": { + "enableMasterLoadBalancer": False, + "enableReplicaLoadBalancer": False + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) + + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + 'ClusterIP', + "Expected LoadBalancer service type for master, found {}") + + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("replica")), + 'ClusterIP', + "Expected LoadBalancer service type for master, found {}") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_infrastructure_roles(self): + ''' + Test using external secrets for infrastructure roles + ''' + k8s = self.k8s + # update infrastructure roles description + secret_name = "postgresql-infrastructure-roles" + roles = "secretname: postgresql-infrastructure-roles-new, userkey: user,"\ + "rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon" + patch_infrastructure_roles = { + "data": { + "infrastructure_roles_secret_name": secret_name, + "infrastructure_roles_secrets": roles, + }, } - k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources) - k8s.wait_for_master_failover(failover_targets) + k8s.update_config(patch_infrastructure_roles) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, + "Operator does not get in sync") - pods = k8s.api.core_v1.list_namespaced_pod( - 'default', label_selector='spilo-role=master,' + cluster_label).items - self.assert_master_is_unique() - masterPod = pods[0] + try: + # check that new roles are represented in the config by requesting the + # operator configuration via API - self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit, - "Expected CPU limit {}, found {}" - .format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu'])) - self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit, - "Expected memory limit {}, found {}" - .format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory'])) + def verify_role(): + try: + operator_pod = k8s.get_operator_pod() + get_config_cmd = "wget --quiet -O - localhost:8080/config" + result = k8s.exec_with_kubectl(operator_pod.metadata.name, + get_config_cmd) + try: + roles_dict = (json.loads(result.stdout) + .get("controller", {}) + .get("InfrastructureRoles")) + except: + return False + + if "robot_zmon_acid_monitoring_new" in roles_dict: + role = roles_dict["robot_zmon_acid_monitoring_new"] + role.pop("Password", None) + self.assertDictEqual(role, { + "Name": "robot_zmon_acid_monitoring_new", + "Flags": None, + "MemberOf": ["robot_zmon"], + "Parameters": None, + "AdminRole": "", + "Origin": 2, + }) + return True + except: + pass + + return False + + self.eventuallyTrue(verify_role, "infrastructure role setup is not loaded") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_multi_namespace_support(self): + def test_lazy_spilo_upgrade(self): ''' - Create a customized Postgres cluster in a non-default namespace. + Test lazy upgrade for the Spilo image: operator changes a stateful set + but lets pods run with the old image until they are recreated for + reasons other than operator's activity. That works because the operator + configures stateful sets to use "onDelete" pod update policy. + + The test covers: + 1) enabling lazy upgrade in existing operator deployment + 2) forcing the normal rolling upgrade by changing the operator + configmap and restarting its pod ''' + k8s = self.k8s - with open("manifests/complete-postgres-manifest.yaml", 'r+') as f: - pg_manifest = yaml.safe_load(f) - pg_manifest["metadata"]["namespace"] = self.namespace - yaml.dump(pg_manifest, f, Dumper=yaml.Dumper) + pod0 = 'acid-minimal-cluster-0' + pod1 = 'acid-minimal-cluster-1' - k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") - k8s.wait_for_pod_start("spilo-role=master", self.namespace) - self.assert_master_is_unique(self.namespace, "acid-test-cluster") + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, + "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), + 2, "Postgres status did not enter running") - @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_scaling(self): - ''' - Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. - ''' - k8s = self.k8s - labels = "cluster-name=acid-minimal-cluster" - - k8s.wait_for_pg_to_scale(3) - self.assertEqual(3, k8s.count_pods_with_label(labels)) - self.assert_master_is_unique() - - k8s.wait_for_pg_to_scale(2) - self.assertEqual(2, k8s.count_pods_with_label(labels)) - self.assert_master_is_unique() - - @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_taint_based_eviction(self): - ''' - Add taint "postgres=:NoExecute" to node with master. This must cause a failover. - ''' - k8s = self.k8s - cluster_label = 'cluster-name=acid-minimal-cluster' - - # get nodes of master and replica(s) (expected target of new master) - current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label) - num_replicas = len(failover_targets) - - # if all pods live on the same node, failover will happen to other worker(s) - failover_targets = [x for x in failover_targets if x != current_master_node] - if len(failover_targets) == 0: - nodes = k8s.api.core_v1.list_node() - for n in nodes.items: - if "node-role.kubernetes.io/master" not in n.metadata.labels and n.metadata.name != current_master_node: - failover_targets.append(n.metadata.name) - - # taint node with postgres=:NoExecute to force failover - body = { - "spec": { - "taints": [ - { - "effect": "NoExecute", - "key": "postgres" - } - ] + patch_lazy_spilo_upgrade = { + "data": { + "docker_image": SPILO_CURRENT, + "enable_lazy_spilo_upgrade": "false" } } + k8s.update_config(patch_lazy_spilo_upgrade, + step="Init baseline image version") - # patch node and test if master is failing over to one of the expected nodes - k8s.api.core_v1.patch_node(current_master_node, body) - k8s.wait_for_master_failover(failover_targets) - k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(lambda: k8s.get_statefulset_image(), SPILO_CURRENT, + "Statefulset not updated initially") + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, + "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), + 2, "Postgres status did not enter running") - new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label) - self.assertNotEqual(current_master_node, new_master_node, - "Master on {} did not fail over to one of {}".format(current_master_node, failover_targets)) - self.assertEqual(num_replicas, len(new_replica_nodes), - "Expected {} replicas, found {}".format(num_replicas, len(new_replica_nodes))) - self.assert_master_is_unique() + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), + SPILO_CURRENT, "Rolling upgrade was not executed") + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod1), + SPILO_CURRENT, "Rolling upgrade was not executed") - # undo the tainting - body = { - "spec": { - "taints": [] + # update docker image in config and enable the lazy upgrade + conf_image = SPILO_LAZY + patch_lazy_spilo_upgrade = { + "data": { + "docker_image": conf_image, + "enable_lazy_spilo_upgrade": "true" } } - k8s.api.core_v1.patch_node(new_master_node, body) + k8s.update_config(patch_lazy_spilo_upgrade, + step="patch image and lazy upgrade") + self.eventuallyEqual(lambda: k8s.get_statefulset_image(), conf_image, + "Statefulset not updated to next Docker image") + + try: + # restart the pod to get a container with the new image + k8s.api.core_v1.delete_namespaced_pod(pod0, 'default') + + # verify only pod-0 which was deleted got new image from statefulset + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), + conf_image, "Delete pod-0 did not get new spilo image") + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, + "No two pods running after lazy rolling upgrade") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), + 2, "Postgres status did not enter running") + self.assertNotEqual(lambda: k8s.get_effective_pod_image(pod1), + SPILO_CURRENT, + "pod-1 should not have change Docker image to {}".format(SPILO_CURRENT)) + + # clean up + unpatch_lazy_spilo_upgrade = { + "data": { + "enable_lazy_spilo_upgrade": "false", + } + } + k8s.update_config(unpatch_lazy_spilo_upgrade, step="patch lazy upgrade") + + # at this point operator will complete the normal rolling upgrade + # so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), + conf_image, "Rolling upgrade was not executed", + 50, 3) + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod1), + conf_image, "Rolling upgrade was not executed", + 50, 3) + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), + 2, "Postgres status did not enter running") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_logical_backup_cron_job(self): @@ -266,50 +607,211 @@ class EndToEndTestCase(unittest.TestCase): } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup) - k8s.wait_for_logical_backup_job_creation() - jobs = k8s.get_logical_backup_job().items - self.assertEqual(1, len(jobs), "Expected 1 logical backup job, found {}".format(len(jobs))) + try: + self.eventuallyEqual(lambda: len(k8s.get_logical_backup_job().items), 1, "failed to create logical backup job") - job = jobs[0] - self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster", - "Expected job name {}, found {}" - .format("logical-backup-acid-minimal-cluster", job.metadata.name)) - self.assertEqual(job.spec.schedule, schedule, - "Expected {} schedule, found {}" - .format(schedule, job.spec.schedule)) + job = k8s.get_logical_backup_job().items[0] + self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster", + "Expected job name {}, found {}" + .format("logical-backup-acid-minimal-cluster", job.metadata.name)) + self.assertEqual(job.spec.schedule, schedule, + "Expected {} schedule, found {}" + .format(schedule, job.spec.schedule)) - # update the cluster-wide image of the logical backup pod - image = "test-image-name" - patch_logical_backup_image = { + # update the cluster-wide image of the logical backup pod + image = "test-image-name" + patch_logical_backup_image = { + "data": { + "logical_backup_docker_image": image, + } + } + k8s.update_config(patch_logical_backup_image, step="patch logical backup image") + + def get_docker_image(): + jobs = k8s.get_logical_backup_job().items + return jobs[0].spec.job_template.spec.template.spec.containers[0].image + + self.eventuallyEqual(get_docker_image, image, + "Expected job image {}, found {}".format(image, "{}")) + + # delete the logical backup cron job + pg_patch_disable_backup = { + "spec": { + "enableLogicalBackup": False, + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup) + + self.eventuallyEqual(lambda: len(k8s.get_logical_backup_job().items), 0, "failed to create logical backup job") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + + # ensure cluster is healthy after tests + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_min_resource_limits(self): + ''' + Lower resource limits below configured minimum and let operator fix it + ''' + k8s = self.k8s + # self.eventuallyEqual(lambda: k8s.pg_get_status(), "Running", "Cluster not healthy at start") + + # configure minimum boundaries for CPU and memory limits + minCPULimit = '503m' + minMemoryLimit = '502Mi' + + patch_min_resource_limits = { "data": { - "logical_backup_docker_image": image, + "min_cpu_limit": minCPULimit, + "min_memory_limit": minMemoryLimit } } - k8s.update_config(patch_logical_backup_image) - jobs = k8s.get_logical_backup_job().items - actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image - self.assertEqual(actual_image, image, - "Expected job image {}, found {}".format(image, actual_image)) - - # delete the logical backup cron job - pg_patch_disable_backup = { + # lower resource limits below minimum + pg_patch_resources = { "spec": { - "enableLogicalBackup": False, + "resources": { + "requests": { + "cpu": "10m", + "memory": "50Mi" + }, + "limits": { + "cpu": "200m", + "memory": "200Mi" + } + } } } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup) - k8s.wait_for_logical_backup_job_deletion() - jobs = k8s.get_logical_backup_job().items - self.assertEqual(0, len(jobs), - "Expected 0 logical backup jobs, found {}".format(len(jobs))) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources) + + k8s.patch_statefulset({"metadata": {"annotations": {"zalando-postgres-operator-rolling-update-required": "False"}}}) + k8s.update_config(patch_min_resource_limits, "Minimum resource test") + + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No two pods running after lazy rolling upgrade") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running") + + def verify_pod_limits(): + pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items + if len(pods) < 2: + return False + + r = pods[0].spec.containers[0].resources.limits['memory'] == minMemoryLimit + r = r and pods[0].spec.containers[0].resources.limits['cpu'] == minCPULimit + r = r and pods[1].spec.containers[0].resources.limits['memory'] == minMemoryLimit + r = r and pods[1].spec.containers[0].resources.limits['cpu'] == minCPULimit + return r + + self.eventuallyTrue(verify_pod_limits, "Pod limits where not adjusted") + + @classmethod + def setUp(cls): + # cls.k8s.update_config({}, step="Setup") + cls.k8s.patch_statefulset({"meta": {"annotations": {"zalando-postgres-operator-rolling-update-required": False}}}) + pass + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_multi_namespace_support(self): + ''' + Create a customized Postgres cluster in a non-default namespace. + ''' + k8s = self.k8s + + with open("manifests/complete-postgres-manifest.yaml", 'r+') as f: + pg_manifest = yaml.safe_load(f) + pg_manifest["metadata"]["namespace"] = self.test_namespace + yaml.dump(pg_manifest, f, Dumper=yaml.Dumper) + + try: + k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") + k8s.wait_for_pod_start("spilo-role=master", self.test_namespace) + self.assert_master_is_unique(self.test_namespace, "acid-test-cluster") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + finally: + # delete the new cluster so that the k8s_api.get_operator_state works correctly in subsequent tests + # ideally we should delete the 'test' namespace here but + # the pods inside the namespace stuck in the Terminating state making the test time out + k8s.api.custom_objects_api.delete_namespaced_custom_object( + "acid.zalan.do", "v1", self.test_namespace, "postgresqls", "acid-test-cluster") + time.sleep(5) + + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_zz_node_readiness_label(self): + ''' + Remove node readiness label from master node. This must cause a failover. + ''' + k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + readiness_label = 'lifecycle-status' + readiness_value = 'ready' + + try: + # get nodes of master and replica(s) (expected target of new master) + current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label) + num_replicas = len(current_replica_nodes) + failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes) + + # add node_readiness_label to potential failover nodes + patch_readiness_label = { + "metadata": { + "labels": { + readiness_label: readiness_value + } + } + } + self.assertTrue(len(failover_targets) > 0, "No failover targets available") + for failover_target in failover_targets: + k8s.api.core_v1.patch_node(failover_target, patch_readiness_label) + + # define node_readiness_label in config map which should trigger a failover of the master + patch_readiness_label_config = { + "data": { + "node_readiness_label": readiness_label + ':' + readiness_value, + } + } + k8s.update_config(patch_readiness_label_config, "setting readiness label") + new_master_node, new_replica_nodes = self.assert_failover( + current_master_node, num_replicas, failover_targets, cluster_label) + + # patch also node where master ran before + k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label) + + # toggle pod anti affinity to move replica away from master node + self.eventuallyTrue(lambda: self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label), "Pods are redistributed") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_scaling(self): + ''' + Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. + ''' + k8s = self.k8s + pod = "acid-minimal-cluster-0" + + k8s.scale_cluster(3) + self.eventuallyEqual(lambda: k8s.count_running_pods(), 3, "Scale up to 3 failed") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 3, "Not all 3 nodes healthy") + + k8s.scale_cluster(2) + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "Scale down to 2 failed") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 2, "Not all members 2 healthy") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_service_annotations(self): ''' - Create a Postgres cluster with service annotations and check them. + Create a Postgres cluster with service annotations and check them. ''' k8s = self.k8s patch_custom_service_annotations = { @@ -322,7 +824,8 @@ class EndToEndTestCase(unittest.TestCase): pg_patch_custom_annotations = { "spec": { "serviceAnnotations": { - "annotation.key": "value" + "annotation.key": "value", + "alice": "bob", } } } @@ -332,11 +835,11 @@ class EndToEndTestCase(unittest.TestCase): annotations = { "annotation.key": "value", "foo": "bar", + "alice": "bob" } - self.assertTrue(k8s.check_service_annotations( - "cluster-name=acid-service-annotations,spilo-role=master", annotations)) - self.assertTrue(k8s.check_service_annotations( - "cluster-name=acid-service-annotations,spilo-role=replica", annotations)) + + self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=master", annotations), "Wrong annotations") + self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=replica", annotations), "Wrong annotations") # clean up unpatch_custom_service_annotations = { @@ -346,140 +849,356 @@ class EndToEndTestCase(unittest.TestCase): } k8s.update_config(unpatch_custom_service_annotations) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_statefulset_annotation_propagation(self): + ''' + Inject annotation to Postgresql CRD and check it's propagation to stateful set + ''' + k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + + patch_sset_propagate_annotations = { + "data": { + "downscaler_annotations": "deployment-time,downscaler/*", + "inherited_annotations": "owned-by", + } + } + k8s.update_config(patch_sset_propagate_annotations) + + pg_crd_annotations = { + "metadata": { + "annotations": { + "deployment-time": "2020-04-30 12:00:00", + "downscaler/downtime_replicas": "0", + "owned-by": "acid", + }, + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations) + + annotations = { + "deployment-time": "2020-04-30 12:00:00", + "downscaler/downtime_replicas": "0", + "owned-by": "acid", + } + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing") + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + @unittest.skip("Skipping this test until fixed") + def test_zzz_taint_based_eviction(self): + ''' + Add taint "postgres=:NoExecute" to node with master. This must cause a failover. + ''' + k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + + # verify we are in good state from potential previous tests + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") + + # get nodes of master and replica(s) (expected target of new master) + master_nodes, replica_nodes = k8s.get_cluster_nodes() + + self.assertNotEqual(master_nodes, []) + self.assertNotEqual(replica_nodes, []) + + # taint node with postgres=:NoExecute to force failover + body = { + "spec": { + "taints": [ + { + "effect": "NoExecute", + "key": "postgres" + } + ] + } + } + + k8s.api.core_v1.patch_node(master_nodes[0], body) + self.eventuallyTrue(lambda: k8s.get_cluster_nodes()[0], replica_nodes) + self.assertNotEqual(lambda: k8s.get_cluster_nodes()[0], master_nodes) + + # add toleration to pods + patch_toleration_config = { + "data": { + "toleration": "key:postgres,operator:Exists,effect:NoExecute" + } + } + + k8s.update_config(patch_toleration_config, step="allow tainted nodes") + + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") + + # toggle pod anti affinity to move replica away from master node + nm, new_replica_nodes = k8s.get_cluster_nodes() + new_master_node = nm[0] + self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_node_affinity(self): + ''' + Add label to a node and update postgres cluster spec to deploy only on a node with that label + ''' + k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + + # verify we are in good state from potential previous tests + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # get nodes of master and replica(s) + master_node, replica_nodes = k8s.get_pg_nodes(cluster_label) + + self.assertNotEqual(master_node, []) + self.assertNotEqual(replica_nodes, []) + + # label node with environment=postgres + node_label_body = { + "metadata": { + "labels": { + "node-affinity-test": "postgres" + } + } + } + + try: + # patch current master node with the label + print('patching master node: {}'.format(master_node)) + k8s.api.core_v1.patch_node(master_node, node_label_body) + + # add node affinity to cluster + patch_node_affinity_config = { + "spec": { + "nodeAffinity" : { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "node-affinity-test", + "operator": "In", + "values": [ + "postgres" + ] + } + ] + } + ] + } + } + } + } + + k8s.api.custom_objects_api.patch_namespaced_custom_object( + group="acid.zalan.do", + version="v1", + namespace="default", + plural="postgresqls", + name="acid-minimal-cluster", + body=patch_node_affinity_config) + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement + k8s.wait_for_pod_failover(master_node, 'spilo-role=replica,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + + podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'replica': + self.assertEqual(master_node, pod.spec.node_name, + "Sanity check: expected replica to relocate to master node {}, but found on {}".format(master_node, pod.spec.node_name)) + + # check that pod has correct node affinity + key = pod.spec.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms[0].match_expressions[0].key + value = pod.spec.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms[0].match_expressions[0].values[0] + self.assertEqual("node-affinity-test", key, + "Sanity check: expect node selector key to be equal to 'node-affinity-test' but got {}".format(key)) + self.assertEqual("postgres", value, + "Sanity check: expect node selector value to be equal to 'postgres' but got {}".format(value)) + + patch_node_remove_affinity_config = { + "spec": { + "nodeAffinity" : None + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + group="acid.zalan.do", + version="v1", + namespace="default", + plural="postgresqls", + name="acid-minimal-cluster", + body=patch_node_remove_affinity_config) + self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # remove node affinity to move replica away from master node + nm, new_replica_nodes = k8s.get_cluster_nodes() + new_master_node = nm[0] + self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_zzzz_cluster_deletion(self): + ''' + Test deletion with configured protection + ''' + k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + + # configure delete protection + patch_delete_annotations = { + "data": { + "delete_annotation_date_key": "delete-date", + "delete_annotation_name_key": "delete-clustername" + } + } + k8s.update_config(patch_delete_annotations) + time.sleep(25) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + try: + # this delete attempt should be omitted because of missing annotations + k8s.api.custom_objects_api.delete_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster") + time.sleep(15) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # check that pods and services are still there + k8s.wait_for_running_pods(cluster_label, 2) + k8s.wait_for_service(cluster_label) + + # recreate Postgres cluster resource + k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") + + # wait a little before proceeding + time.sleep(10) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # add annotations to manifest + delete_date = datetime.today().strftime('%Y-%m-%d') + pg_patch_delete_annotations = { + "metadata": { + "annotations": { + "delete-date": delete_date, + "delete-clustername": "acid-minimal-cluster", + } + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # wait a little before proceeding + time.sleep(20) + k8s.wait_for_running_pods(cluster_label, 2) + k8s.wait_for_service(cluster_label) + + # now delete process should be triggered + k8s.api.custom_objects_api.delete_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster") + + self.eventuallyEqual(lambda: len(k8s.api.custom_objects_api.list_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", label_selector="cluster-name=acid-minimal-cluster")["items"]), 0, "Manifest not deleted") + + # check if everything has been deleted + self.eventuallyEqual(lambda: k8s.count_pods_with_label(cluster_label), 0, "Pods not deleted") + self.eventuallyEqual(lambda: k8s.count_services_with_label(cluster_label), 0, "Service not deleted") + self.eventuallyEqual(lambda: k8s.count_endpoints_with_label(cluster_label), 0, "Endpoints not deleted") + self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted") + self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted") + self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets not deleted") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + + # reset configmap + patch_delete_annotations = { + "data": { + "delete_annotation_date_key": "", + "delete_annotation_name_key": "" + } + } + k8s.update_config(patch_delete_annotations) + + def get_failover_targets(self, master_node, replica_nodes): + ''' + If all pods live on the same node, failover will happen to other worker(s) + ''' + k8s = self.k8s + k8s_master_exclusion = 'kubernetes.io/hostname!=postgres-operator-e2e-tests-control-plane' + + failover_targets = [x for x in replica_nodes if x != master_node] + if len(failover_targets) == 0: + nodes = k8s.api.core_v1.list_node(label_selector=k8s_master_exclusion) + for n in nodes.items: + if n.metadata.name != master_node: + failover_targets.append(n.metadata.name) + + return failover_targets + + def assert_failover(self, current_master_node, num_replicas, failover_targets, cluster_label): + ''' + Check if master is failing over. The replica should move first to be the switchover target + ''' + k8s = self.k8s + k8s.wait_for_pod_failover(failover_targets, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + + new_master_node, new_replica_nodes = k8s.get_pg_nodes(cluster_label) + self.assertNotEqual(current_master_node, new_master_node, + "Master on {} did not fail over to one of {}".format(current_master_node, failover_targets)) + self.assertEqual(num_replicas, len(new_replica_nodes), + "Expected {} replicas, found {}".format(num_replicas, len(new_replica_nodes))) + self.assert_master_is_unique() + + return new_master_node, new_replica_nodes + def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"): ''' Check that there is a single pod in the k8s cluster with the label "spilo-role=master" To be called manually after operations that affect pods ''' - k8s = self.k8s labels = 'spilo-role=master,cluster-name=' + clusterName num_of_master_pods = k8s.count_pods_with_label(labels, namespace) self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods)) + def assert_distributed_pods(self, master_node, replica_nodes, cluster_label): + ''' + Other tests can lead to the situation that master and replica are on the same node. + Toggle pod anti affinty to distribute pods accross nodes (replica in particular). + ''' + k8s = self.k8s + failover_targets = self.get_failover_targets(master_node, replica_nodes) -class K8sApi: - - def __init__(self): - - # https://github.com/kubernetes-client/python/issues/309 - warnings.simplefilter("ignore", ResourceWarning) - - self.config = config.load_kube_config() - self.k8s_client = client.ApiClient() - - self.core_v1 = client.CoreV1Api() - self.apps_v1 = client.AppsV1Api() - self.batch_v1_beta1 = client.BatchV1beta1Api() - self.custom_objects_api = client.CustomObjectsApi() - - -class K8s: - ''' - Wraps around K8 api client and helper methods. - ''' - - RETRY_TIMEOUT_SEC = 5 - - def __init__(self): - self.api = K8sApi() - - def get_pg_nodes(self, pg_cluster_name, namespace='default'): - master_pod_node = '' - replica_pod_nodes = [] - podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name) - for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master': - master_pod_node = pod.spec.node_name - elif pod.metadata.labels.get('spilo-role') == 'replica': - replica_pod_nodes.append(pod.spec.node_name) - - return master_pod_node, replica_pod_nodes - - def wait_for_operator_pod_start(self): - self. wait_for_pod_start("name=postgres-operator") - # HACK operator must register CRD / add existing PG clusters after pod start up - # for local execution ~ 10 seconds suffices - time.sleep(60) - - def wait_for_pod_start(self, pod_labels, namespace='default'): - pod_phase = 'No pod running' - while pod_phase != 'Running': - pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items - if pods: - pod_phase = pods[0].status.phase - time.sleep(self.RETRY_TIMEOUT_SEC) - - def get_service_type(self, svc_labels, namespace='default'): - svc_type = '' - svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items - for svc in svcs: - svc_type = svc.spec.type - return svc_type - - def check_service_annotations(self, svc_labels, annotations, namespace='default'): - svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items - for svc in svcs: - if len(svc.metadata.annotations) != len(annotations): - return False - for key in svc.metadata.annotations: - if svc.metadata.annotations[key] != annotations[key]: - return False - return True - - def wait_for_pg_to_scale(self, number_of_instances, namespace='default'): - - body = { - "spec": { - "numberOfInstances": number_of_instances + # enable pod anti affintiy in config map which should trigger movement of replica + patch_enable_antiaffinity = { + "data": { + "enable_pod_antiaffinity": "true" } } - _ = self.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) + k8s.update_config(patch_enable_antiaffinity, "enable antiaffinity") + self.assert_failover(master_node, len(replica_nodes), failover_targets, cluster_label) - labels = 'cluster-name=acid-minimal-cluster' - while self.count_pods_with_label(labels) != number_of_instances: - time.sleep(self.RETRY_TIMEOUT_SEC) - - def count_pods_with_label(self, labels, namespace='default'): - return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) - - def wait_for_master_failover(self, expected_master_nodes, namespace='default'): - pod_phase = 'Failing over' - new_master_node = '' - labels = 'spilo-role=master,cluster-name=acid-minimal-cluster' - - while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes): - pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items - if pods: - new_master_node = pods[0].spec.node_name - pod_phase = pods[0].status.phase - time.sleep(self.RETRY_TIMEOUT_SEC) - - def get_logical_backup_job(self, namespace='default'): - return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") - - def wait_for_logical_backup_job(self, expected_num_of_jobs): - while (len(self.get_logical_backup_job().items) != expected_num_of_jobs): - time.sleep(self.RETRY_TIMEOUT_SEC) - - def wait_for_logical_backup_job_deletion(self): - self.wait_for_logical_backup_job(expected_num_of_jobs=0) - - def wait_for_logical_backup_job_creation(self): - self.wait_for_logical_backup_job(expected_num_of_jobs=1) - - def update_config(self, config_map_patch): - self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) - - operator_pod = self.api.core_v1.list_namespaced_pod( - 'default', label_selector="name=postgres-operator").items[0].metadata.name - self.api.core_v1.delete_namespaced_pod(operator_pod, "default") # restart reloads the conf - self.wait_for_operator_pod_start() - - def create_with_kubectl(self, path): - subprocess.run(["kubectl", "create", "-f", path]) + # now disable pod anti affintiy again which will cause yet another failover + patch_disable_antiaffinity = { + "data": { + "enable_pod_antiaffinity": "false" + } + } + k8s.update_config(patch_disable_antiaffinity, "disable antiaffinity") + k8s.wait_for_pod_start('spilo-role=master') + k8s.wait_for_pod_start('spilo-role=replica') + return True if __name__ == '__main__': diff --git a/go.mod b/go.mod index 36686dcf6..bbe5140b7 100644 --- a/go.mod +++ b/go.mod @@ -1,25 +1,22 @@ module github.com/zalando/postgres-operator -go 1.12 +go 1.15 require ( - github.com/aws/aws-sdk-go v1.25.44 - github.com/emicklei/go-restful v2.9.6+incompatible // indirect - github.com/evanphx/json-patch v4.5.0+incompatible // indirect - github.com/googleapis/gnostic v0.3.0 // indirect - github.com/imdario/mergo v0.3.8 // indirect - github.com/lib/pq v1.2.0 + github.com/aws/aws-sdk-go v1.36.29 + github.com/golang/mock v1.4.4 + github.com/lib/pq v1.9.0 github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d - github.com/sirupsen/logrus v1.4.2 - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect - golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect - golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 // indirect - golang.org/x/tools v0.0.0-20191209225234-22774f7dae43 // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - gopkg.in/yaml.v2 v2.2.4 - k8s.io/api v0.0.0-20191121015604-11707872ac1c - k8s.io/apiextensions-apiserver v0.0.0-20191204090421-cd61debedab5 - k8s.io/apimachinery v0.0.0-20191203211716-adc6f4cd9e7d - k8s.io/client-go v0.0.0-20191204082520-bc9b51d240b2 - k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e + github.com/r3labs/diff v1.1.0 + github.com/sirupsen/logrus v1.7.0 + github.com/stretchr/testify v1.6.1 + golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c + golang.org/x/mod v0.4.0 // indirect + golang.org/x/tools v0.0.0-20201207204333-a835c872fcea // indirect + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.19.4 + k8s.io/apiextensions-apiserver v0.19.3 + k8s.io/apimachinery v0.19.4 + k8s.io/client-go v0.19.3 + k8s.io/code-generator v0.19.4 ) diff --git a/go.sum b/go.sum index f85dd060f..64434e2e0 100644 --- a/go.sum +++ b/go.sum @@ -1,18 +1,33 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= @@ -22,68 +37,93 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.25.44 h1:n9ahFoiyn66smjF34hYr3tb6/ZdBcLuFz7BCDhHyJ7I= -github.com/aws/aws-sdk-go v1.25.44/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.36.29 h1:lM1G3AF1+7vzFm0n7hfH8r2+750BTo+6Lo6FtPB7kzk= +github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.6+incompatible h1:tfrHha8zJ01ywiOEC1miGY8st1/igzWB8OmvPgoYX7w= -github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= @@ -101,9 +141,11 @@ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= @@ -114,6 +156,7 @@ github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8 github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -123,45 +166,67 @@ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tF github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5 h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -169,34 +234,38 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -209,26 +278,31 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+pW6rOkFdld9QQ7jRydBKKM6jyPVI= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -236,85 +310,136 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M= +github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5 h1:Gqga3zA9tdAcfqobUGjSoCob5L3f8Dt5EuOp3ihNZko= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c h1:9HhBz5L/UjnK9XLtiZhYAdue5BVKep3PMmS2LuPDt8k= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -326,92 +451,159 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191209225234-22774f7dae43 h1:NfPq5mgc5ArFgVLCpeS4z07IoxSAqVfV/gQ5vxdgaxI= -golang.org/x/tools v0.0.0-20191209225234-22774f7dae43/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201207204333-a835c872fcea h1:LgKM3cNs8xO6GK1ZVK0nasPn7IN39Sz9EBTwQLyishk= +golang.org/x/tools v0.0.0-20201207204333-a835c872fcea/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -420,47 +612,51 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20191121015604-11707872ac1c h1:Z87my3sF4WhG0OMxzARkWY/IKBtOr+MhXZAb4ts6qFc= -k8s.io/api v0.0.0-20191121015604-11707872ac1c/go.mod h1:R/s4gKT0V/cWEnbQa9taNRJNbWUK57/Dx6cPj6MD3A0= -k8s.io/apiextensions-apiserver v0.0.0-20191204090421-cd61debedab5 h1:g+GvnbGqLU1Jxb/9iFm/BFcmkqG9HdsGh52+wHirpsM= -k8s.io/apiextensions-apiserver v0.0.0-20191204090421-cd61debedab5/go.mod h1:CPw0IHz1YrWGy0+8mG/76oTHXvChlgCb3EAezKQKB2I= -k8s.io/apimachinery v0.0.0-20191121015412-41065c7a8c2a/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.0.0-20191128180518-03184f823e28/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.0.0-20191203211716-adc6f4cd9e7d h1:q+OZmYewHJeMCzwpHkXlNTtk5bvaUMPCikKvf77RBlo= -k8s.io/apimachinery v0.0.0-20191203211716-adc6f4cd9e7d/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apiserver v0.0.0-20191204084332-137a9d3b886b/go.mod h1:itgfam5HJbT/4b2BGfpUkkxfheMmDH+Ix+tEAP3uqZk= -k8s.io/client-go v0.0.0-20191204082517-8c19b9f4a642/go.mod h1:HMVIZ0dPop3WCrPEaJ+v5/94cjt56avdDFshpX0Fjvo= -k8s.io/client-go v0.0.0-20191204082519-e9644b2e3edc/go.mod h1:5lSG1yeDZVwDYAHe9VK48SCe5zmcnkAcf2Mx59TuhmM= -k8s.io/client-go v0.0.0-20191204082520-bc9b51d240b2 h1:T2HGghBOPAOEjWuIyFSeCsWEwsxa6unkBvy3PHfqonM= -k8s.io/client-go v0.0.0-20191204082520-bc9b51d240b2/go.mod h1:5lSG1yeDZVwDYAHe9VK48SCe5zmcnkAcf2Mx59TuhmM= -k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e h1:HB9Zu5ZUvJfNpLiTPhz+CebVKV8C39qTBMQkAgAZLNw= -k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/component-base v0.0.0-20191204083903-0d4d24e738e4/go.mod h1:8VIh1jErItC4bg9hLBkPneyS77Tin8KwSzbYepHJnQI= -k8s.io/component-base v0.0.0-20191204083906-3ac1376c73aa/go.mod h1:mECWvHCPhJudDVDMtBl+AIf/YnTMp5r1F947OYFUwP0= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= +k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= +k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= +k8s.io/apiextensions-apiserver v0.19.3 h1:WZxBypSHW4SdXHbdPTS/Jy7L2la6Niggs8BuU5o+avo= +k8s.io/apiextensions-apiserver v0.19.3/go.mod h1:igVEkrE9TzInc1tYE7qSqxaLg/rEAp6B5+k9Q7+IC8Q= +k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= +k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apiserver v0.19.3/go.mod h1:bx6dMm+H6ifgKFpCQT/SAhPwhzoeIMlHIaibomUDec0= +k8s.io/client-go v0.19.3 h1:ctqR1nQ52NUs6LpI0w+a5U+xjYwflFwA13OJKcicMxg= +k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM= +k8s.io/code-generator v0.19.3/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.19.4 h1:c8IL7RgTgJaYgr2bYMgjN0WikHnohbBhEgajfIkuP5I= +k8s.io/code-generator v0.19.4/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/component-base v0.19.3/go.mod h1:WhLWSIefQn8W8jxSLl5WNiR6z8oyMe/8Zywg7alOkRc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 h1:t4L10Qfx/p7ASH3gXCdIUtPbbIuegCoUJf3TMSFekjw= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/kubectl-pg/cmd/check.go b/kubectl-pg/cmd/check.go index 266047cf0..4f88e7efa 100644 --- a/kubectl-pg/cmd/check.go +++ b/kubectl-pg/cmd/check.go @@ -24,19 +24,20 @@ package cmd import ( "fmt" + "log" + "github.com/spf13/cobra" postgresConstants "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextbeta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "log" ) // checkCmd represent kubectl pg check. var checkCmd = &cobra.Command{ Use: "check", Short: "Checks the Postgres operator is installed in the k8s cluster", - Long: `Checks that the Postgres CRD is registered in a k8s cluster. + Long: `Checks that the Postgres CRD is registered in a k8s cluster. This means that the operator pod was able to start normally.`, Run: func(cmd *cobra.Command, args []string) { check() @@ -47,9 +48,9 @@ kubectl pg check } // check validates postgresql CRD registered or not. -func check() *v1beta1.CustomResourceDefinition { +func check() *v1.CustomResourceDefinition { config := getConfig() - apiExtClient, err := apiextbeta1.NewForConfig(config) + apiExtClient, err := apiextv1.NewForConfig(config) if err != nil { log.Fatal(err) } diff --git a/kubectl-pg/cmd/list.go b/kubectl-pg/cmd/list.go index df827ffaf..f4dea882d 100644 --- a/kubectl-pg/cmd/list.go +++ b/kubectl-pg/cmd/list.go @@ -24,13 +24,14 @@ package cmd import ( "fmt" - "github.com/spf13/cobra" - "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "log" "strconv" "time" + + "github.com/spf13/cobra" + v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -95,8 +96,12 @@ func listAll(listPostgres *v1.PostgresqlList) { template := "%-32s%-16s%-12s%-12s%-12s%-12s%-12s\n" fmt.Printf(template, "NAME", "STATUS", "INSTANCES", "VERSION", "AGE", "VOLUME", "NAMESPACE") for _, pgObjs := range listPostgres.Items { - fmt.Printf(template, pgObjs.Name, pgObjs.Status.PostgresClusterStatus, strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)), - pgObjs.Spec.PgVersion, time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp), pgObjs.Spec.Size, pgObjs.Namespace) + fmt.Printf(template, pgObjs.Name, + pgObjs.Status.PostgresClusterStatus, + strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)), + pgObjs.Spec.PostgresqlParam.PgVersion, + time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp), + pgObjs.Spec.Size, pgObjs.Namespace) } } @@ -104,8 +109,12 @@ func listWithNamespace(listPostgres *v1.PostgresqlList) { template := "%-32s%-16s%-12s%-12s%-12s%-12s\n" fmt.Printf(template, "NAME", "STATUS", "INSTANCES", "VERSION", "AGE", "VOLUME") for _, pgObjs := range listPostgres.Items { - fmt.Printf(template, pgObjs.Name, pgObjs.Status.PostgresClusterStatus, strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)), - pgObjs.Spec.PgVersion, time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp), pgObjs.Spec.Size) + fmt.Printf(template, pgObjs.Name, + pgObjs.Status.PostgresClusterStatus, + strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)), + pgObjs.Spec.PostgresqlParam.PgVersion, + time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp), + pgObjs.Spec.Size) } } diff --git a/kubectl-pg/cmd/util.go b/kubectl-pg/cmd/util.go index 329f9a28f..a2a5c2073 100644 --- a/kubectl-pg/cmd/util.go +++ b/kubectl-pg/cmd/util.go @@ -25,6 +25,13 @@ package cmd import ( "flag" "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,12 +39,6 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/homedir" - "log" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" ) const ( @@ -88,7 +89,7 @@ func confirmAction(clusterName string, namespace string) { } clusterDetails := strings.Split(confirmClusterDetails, "/") if clusterDetails[0] != namespace || clusterDetails[1] != clusterName { - fmt.Printf("cluster name or namespace doesn't match. Please re-enter %s/%s\nHint: Press (ctrl+c) to exit\n", namespace, clusterName) + fmt.Printf("cluster name or namespace does not match. Please re-enter %s/%s\nHint: Press (ctrl+c) to exit\n", namespace, clusterName) } else { return } diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 5ae817ca3..9f2d19639 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -4,12 +4,13 @@ metadata: name: acid-test-cluster # labels: # environment: demo +# annotations: +# "acid.zalan.do/controller": "second-operator" +# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured +# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured spec: - dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 + dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 teamId: "acid" - volume: - size: 1Gi -# storageClass: my-sc numberOfInstances: 2 users: # Application/Robot users zalando: @@ -17,18 +18,61 @@ spec: - createdb enableMasterLoadBalancer: false enableReplicaLoadBalancer: false + enableConnectionPooler: false # enable/disable connection pooler deployment + enableReplicaConnectionPooler: false # set to enable connectionPooler for replica service allowedSourceRanges: # load balancers' source ranges for both master and replica services - 127.0.0.1/32 databases: foo: zalando + preparedDatabases: + bar: + defaultUsers: true + extensions: + pg_partman: public + pgcrypto: public + schemas: + data: {} + history: + defaultRoles: true + defaultUsers: false postgresql: - version: "11" - parameters: # Expert section + version: "13" + parameters: # Expert section shared_buffers: "32MB" max_connections: "10" log_statement: "all" + volume: + size: 1Gi +# storageClass: my-sc +# iops: 1000 # for EBS gp3 +# throughput: 250 # in MB/s for EBS gp3 + additionalVolumes: + - name: empty + mountPath: /opt/empty + targetContainers: + - all + volumeSource: + emptyDir: {} +# - name: data +# mountPath: /home/postgres/pgdata/partitions +# targetContainers: +# - postgres +# volumeSource: +# PersistentVolumeClaim: +# claimName: pvc-postgresql-data-partitions +# readyOnly: false +# - name: conf +# mountPath: /etc/telegraf +# subPath: telegraf.conf +# targetContainers: +# - telegraf-sidecar +# volumeSource: +# configMap: +# name: my-config-map enableShmVolume: true +# spiloRunAsUser: 101 +# spiloRunAsGroup: 103 # spiloFSGroup: 103 # podAnnotations: # annotation.key: value @@ -51,9 +95,9 @@ spec: encoding: "UTF8" locale: "en_US.UTF-8" data-checksums: "true" - pg_hba: - - hostssl all all 0.0.0.0/0 md5 - - host all all 0.0.0.0/0 md5 +# pg_hba: +# - hostssl all all 0.0.0.0/0 md5 +# - host all all 0.0.0.0/0 md5 # slots: # permanent_physical_1: # type: physical @@ -64,6 +108,8 @@ spec: ttl: 30 loop_wait: &loop_wait 10 retry_timeout: 10 + synchronous_mode: false + synchronous_mode_strict: false maximum_lag_on_failover: 33554432 # restore a Postgres DB with point-in-time-recovery @@ -83,6 +129,20 @@ spec: # - 01:00-06:00 #UTC # - Sat:00:00-04:00 +# overwrite custom properties for connection pooler deployments +# connectionPooler: +# numberOfInstances: 2 +# mode: "transaction" +# schema: "pooler" +# user: "pooler" +# resources: +# requests: +# cpu: 300m +# memory: 100Mi +# limits: +# cpu: "1" +# memory: 100Mi + initContainers: - name: date image: busybox @@ -100,3 +160,28 @@ spec: # env: # - name: "USEFUL_VAR" # value: "perhaps-true" + +# Custom TLS certificate. Disabled unless tls.secretName has a value. + tls: + secretName: "" # should correspond to a Kubernetes Secret resource to load + certificateFile: "tls.crt" + privateKeyFile: "tls.key" + caFile: "" # optionally configure Postgres with a CA certificate + caSecretName: "" # optionally the ca.crt can come from this secret instead. +# file names can be also defined with absolute path, and will no longer be relative +# to the "/tls/" path where the secret is being mounted by default, and "/tlsca/" +# where the caSecret is mounted by default. +# When TLS is enabled, also set spiloFSGroup parameter above to the relevant value. +# if unknown, set it to 103 which is the usual value in the default spilo images. +# In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup. + +# Add node affinity support by allowing postgres pods to schedule only on nodes that +# have label: "postgres-operator:enabled" set. +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: postgres-operator +# operator: In +# values: +# - enabled diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index f85e9e052..5a25cba5b 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -11,6 +11,16 @@ data: cluster_history_entries: "1000" cluster_labels: application:spilo cluster_name_label: cluster-name + # connection_pooler_default_cpu_limit: "1" + # connection_pooler_default_cpu_request: "500m" + # connection_pooler_default_memory_limit: 100Mi + # connection_pooler_default_memory_request: 100Mi + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-12" + # connection_pooler_max_db_connections: 60 + # connection_pooler_mode: "transaction" + # connection_pooler_number_of_instances: 2 + # connection_pooler_schema: "pooler" + # connection_pooler_user: "pooler" # custom_service_annotations: "keyx:valuez,keya:valuea" # custom_pod_annotations: "keya:valuea,keyb:valueb" db_hosted_zone: db.example.com @@ -19,34 +29,52 @@ data: # default_cpu_request: 100m # default_memory_limit: 500Mi # default_memory_request: 100Mi - docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 + # delete_annotation_date_key: delete-date + # delete_annotation_name_key: delete-clustername + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 + # downscaler_annotations: "deployment-time,downscaler/*" # enable_admin_role_for_users: "true" # enable_crd_validation: "true" # enable_database_access: "true" + enable_ebs_gp3_migration: "false" + # enable_ebs_gp3_migration_max_size: "1000" # enable_init_containers: "true" + # enable_lazy_spilo_upgrade: "false" enable_master_load_balancer: "false" + enable_pgversion_env_var: "true" # enable_pod_antiaffinity: "false" # enable_pod_disruption_budget: "true" + # enable_postgres_team_crd: "false" + # enable_postgres_team_crd_superusers: "false" enable_replica_load_balancer: "false" # enable_shm_volume: "true" # enable_sidecars: "true" + enable_spilo_wal_path_compat: "true" # enable_team_superuser: "false" enable_teams_api: "false" # etcd_host: "" - # infrastructure_roles_secret_name: postgresql-infrastructure-roles + external_traffic_policy: "Cluster" + # gcp_credentials: "" + # kubernetes_use_configmaps: "false" + # infrastructure_roles_secret_name: "postgresql-infrastructure-roles" + # infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole" + # inherited_annotations: owned-by # inherited_labels: application,environment # kube_iam_role: "" # log_s3_bucket: "" - # logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0" + # logical_backup_google_application_credentials: "" + logical_backup_job_prefix: "logical-backup-" + logical_backup_provider: "s3" # logical_backup_s3_access_key_id: "" - # logical_backup_s3_bucket: "my-bucket-url" + logical_backup_s3_bucket: "my-bucket-url" # logical_backup_s3_region: "" # logical_backup_s3_endpoint: "" # logical_backup_s3_secret_access_key: "" - # logical_backup_s3_sse: "AES256" - # logical_backup_schedule: "30 00 * * *" + logical_backup_s3_sse: "AES256" + logical_backup_schedule: "30 00 * * *" master_dns_name_format: "{cluster}.{team}.{hostedzone}" - # master_pod_move_timeout: 10m + # master_pod_move_timeout: 20m # max_instances: "-1" # min_instances: "-1" # min_cpu_limit: 250m @@ -59,9 +87,11 @@ data: pdb_name_format: "postgres-{cluster}-pdb" # pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_deletion_wait_timeout: 10m - # pod_environment_configmap: "" + # pod_environment_configmap: "default/my-custom-config" + # pod_environment_secret: "my-custom-secret" pod_label_wait_timeout: 10m pod_management_policy: "ordered_ready" + # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role # pod_service_account_definition: "" pod_service_account_name: "postgres-pod" @@ -82,12 +112,17 @@ data: secret_name_template: "{username}.{cluster}.credentials" # sidecar_docker_images: "" # set_memory_request_to_limit: "false" + # spilo_runasuser: 101 + # spilo_runasgroup: 103 + # spilo_fsgroup: 103 spilo_privileged: "false" + storage_resize_mode: "pvc" super_username: postgres # team_admin_role: "admin" # team_api_role_configuration: "log_statement:all" # teams_api_url: http://fake-teams-api.default.svc.cluster.local # toleration: "" + # wal_gs_bucket: "" # wal_s3_bucket: "" watched_namespace: "*" # listen to all namespaces - workers: "4" + workers: "16" diff --git a/manifests/custom-team-membership.yaml b/manifests/custom-team-membership.yaml new file mode 100644 index 000000000..9af153962 --- /dev/null +++ b/manifests/custom-team-membership.yaml @@ -0,0 +1,13 @@ +apiVersion: "acid.zalan.do/v1" +kind: PostgresTeam +metadata: + name: custom-team-membership +spec: + additionalSuperuserTeams: + acid: + - "postgres_superusers" + additionalTeams: + acid: [] + additionalMembers: + acid: + - "elephant" diff --git a/manifests/e2e-storage-class.yaml b/manifests/e2e-storage-class.yaml new file mode 100644 index 000000000..c8d941341 --- /dev/null +++ b/manifests/e2e-storage-class.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + namespace: kube-system + name: standard + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: kubernetes.io/host-path diff --git a/manifests/fake-teams-api.yaml b/manifests/fake-teams-api.yaml index 97d1b2a98..15f7c7576 100644 --- a/manifests/fake-teams-api.yaml +++ b/manifests/fake-teams-api.yaml @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: fake-teams-api diff --git a/manifests/infrastructure-roles-new.yaml b/manifests/infrastructure-roles-new.yaml new file mode 100644 index 000000000..64b854c6a --- /dev/null +++ b/manifests/infrastructure-roles-new.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +data: + # infrastructure role definition in the new format + # robot_zmon_acid_monitoring_new + user: cm9ib3Rfem1vbl9hY2lkX21vbml0b3JpbmdfbmV3 + # foobar_new + password: Zm9vYmFyX25ldw== +kind: Secret +metadata: + name: postgresql-infrastructure-roles-new + namespace: default +type: Opaque diff --git a/manifests/infrastructure-roles.yaml b/manifests/infrastructure-roles.yaml index 3c2d86850..c66d79139 100644 --- a/manifests/infrastructure-roles.yaml +++ b/manifests/infrastructure-roles.yaml @@ -7,12 +7,14 @@ data: # provide other options in the configmap. # robot_zmon_acid_monitoring user1: cm9ib3Rfem1vbl9hY2lkX21vbml0b3Jpbmc= + # foobar + password1: Zm9vYmFy # robot_zmon inrole1: cm9ib3Rfem1vbg== # testuser user2: dGVzdHVzZXI= - # foobar - password2: Zm9vYmFy + # testpassword + password2: dGVzdHBhc3N3b3Jk # user batman with the password justice # look for other fields in the infrastructure roles configmap batman: anVzdGljZQ== diff --git a/manifests/minimal-fake-pooler-deployment.yaml b/manifests/minimal-fake-pooler-deployment.yaml new file mode 100644 index 000000000..5ee8cf05f --- /dev/null +++ b/manifests/minimal-fake-pooler-deployment.yaml @@ -0,0 +1,35 @@ +# will not run but is good enough for tests to fail +apiVersion: apps/v1 +kind: Deployment +metadata: + name: acid-minimal-cluster-pooler + labels: + application: db-connection-pooler + connection-pooler: acid-minimal-cluster-pooler +spec: + replicas: 1 + selector: + matchLabels: + application: db-connection-pooler + connection-pooler: acid-minimal-cluster-pooler + cluster-name: acid-minimal-cluster + template: + metadata: + labels: + application: db-connection-pooler + connection-pooler: acid-minimal-cluster-pooler + cluster-name: acid-minimal-cluster + spec: + serviceAccountName: postgres-operator + containers: + - name: postgres-operator + image: registry.opensource.zalan.do/acid/pgbouncer:master-12 + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 100m + memory: 250Mi + limits: + cpu: 500m + memory: 500Mi + env: [] diff --git a/manifests/minimal-postgres-manifest.yaml b/manifests/minimal-postgres-manifest.yaml index 75dfdf07f..ff96e392b 100644 --- a/manifests/minimal-postgres-manifest.yaml +++ b/manifests/minimal-postgres-manifest.yaml @@ -15,5 +15,7 @@ spec: foo_user: [] # role for application foo databases: foo: zalando # dbname: owner + preparedDatabases: + bar: {} postgresql: - version: "11" + version: "13" diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index 773bd2514..6e69d6d11 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -26,6 +26,15 @@ rules: - patch - update - watch +# operator only reads PostgresTeams +- apiGroups: + - acid.zalan.do + resources: + - postgresteams + verbs: + - get + - list + - watch # to create or get/update CRDs when starting up - apiGroups: - apiextensions.k8s.io @@ -43,6 +52,18 @@ rules: - configmaps verbs: - get +# to send events to the CRs +- apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - update + - watch # to manage endpoints which are also used by Patroni - apiGroups: - "" @@ -85,6 +106,8 @@ rules: - delete - get - list + - patch + - update # to read existing PVs. Creation should be done via dynamic provisioning - apiGroups: - "" @@ -129,6 +152,7 @@ rules: - apps resources: - statefulsets + - deployments verbs: - create - delete @@ -180,15 +204,15 @@ rules: verbs: - get - create -# to grant privilege to run privileged pods -- apiGroups: - - extensions - resources: - - podsecuritypolicies - resourceNames: - - privileged - verbs: - - use +# to grant privilege to run privileged pods (not needed by default) +#- apiGroups: +# - extensions +# resources: +# - podsecuritypolicies +# resourceNames: +# - privileged +# verbs: +# - use --- apiVersion: rbac.authorization.k8s.io/v1 @@ -203,3 +227,51 @@ subjects: - kind: ServiceAccount name: postgres-operator namespace: default + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: postgres-pod +rules: +# Patroni needs to watch and manage endpoints +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +# Patroni needs to watch pods +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - patch + - update + - watch +# to let Patroni create a headless service +- apiGroups: + - "" + resources: + - services + verbs: + - create +# to grant privilege to run privileged pods (not needed by default) +#- apiGroups: +# - extensions +# resources: +# - podsecuritypolicies +# resourceNames: +# - privileged +# verbs: +# - use diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index c4bc8d458..60ceb56de 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: operatorconfigurations.acid.zalan.do @@ -11,292 +11,532 @@ spec: singular: operatorconfiguration shortNames: - opconfig + categories: + - all scope: Namespaced - subresources: - status: {} - version: v1 - validation: - openAPIV3Schema: - type: object - required: - - kind - - apiVersion - - configuration - properties: - kind: - type: string - enum: - - OperatorConfiguration - apiVersion: - type: string - enum: - - acid.zalan.do/v1 - configuration: - type: object - properties: - docker_image: - type: string - enable_crd_validation: - type: boolean - enable_shm_volume: - type: boolean - etcd_host: - type: string - max_instances: - type: integer - minimum: -1 # -1 = disabled - min_instances: - type: integer - minimum: -1 # -1 = disabled - resync_period: - type: string - repair_period: - type: string - set_memory_request_to_limit: - type: boolean - sidecar_docker_images: - type: object - additionalProperties: - type: string - workers: - type: integer - minimum: 1 - users: - type: object - properties: - replication_username: - type: string - super_username: - type: string - kubernetes: - type: object - properties: - cluster_domain: - type: string - cluster_labels: - type: object - additionalProperties: - type: string - cluster_name_label: - type: string - custom_pod_annotations: - type: object - additionalProperties: - type: string - enable_init_containers: - type: boolean - enable_pod_antiaffinity: - type: boolean - enable_pod_disruption_budget: - type: boolean - enable_sidecars: - type: boolean - infrastructure_roles_secret_name: - type: string - inherited_labels: - type: array - items: - type: string - master_pod_move_timeout: - type: string - node_readiness_label: - type: object - additionalProperties: - type: string - oauth_token_secret_name: - type: string - pdb_name_format: - type: string - pod_antiaffinity_topology_key: - type: string - pod_environment_configmap: - type: string - pod_management_policy: - type: string - enum: - - "ordered_ready" - - "parallel" - pod_priority_class_name: - type: string - pod_role_label: - type: string - pod_service_account_definition: - type: string - pod_service_account_name: - type: string - pod_service_account_role_definition: - type: string - pod_service_account_role_binding_definition: - type: string - pod_terminate_grace_period: - type: string - secret_name_template: - type: string - spilo_fsgroup: - type: integer - spilo_privileged: - type: boolean - toleration: - type: object - additionalProperties: - type: string - watched_namespace: - type: string - postgres_pod_resources: - type: object - properties: - default_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - min_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - min_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - timeouts: - type: object - properties: - pod_label_wait_timeout: - type: string - pod_deletion_wait_timeout: - type: string - ready_wait_interval: - type: string - ready_wait_timeout: - type: string - resource_check_interval: - type: string - resource_check_timeout: - type: string - load_balancer: - type: object - properties: - custom_service_annotations: - type: object - additionalProperties: - type: string - db_hosted_zone: - type: string - enable_master_load_balancer: - type: boolean - enable_replica_load_balancer: - type: boolean - master_dns_name_format: - type: string - replica_dns_name_format: - type: string - aws_or_gcp: - type: object - properties: - additional_secret_mount: - type: string - additional_secret_mount_path: - type: string - aws_region: - type: string - kube_iam_role: - type: string - log_s3_bucket: - type: string - wal_s3_bucket: - type: string - logical_backup: - type: object - properties: - logical_backup_docker_image: - type: string - logical_backup_s3_access_key_id: - type: string - logical_backup_s3_bucket: - type: string - logical_backup_s3_endpoint: - type: string - logical_backup_s3_region: - type: string - logical_backup_s3_secret_access_key: - type: string - logical_backup_s3_sse: - type: string - logical_backup_schedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' - debug: - type: object - properties: - debug_logging: - type: boolean - enable_database_access: - type: boolean - teams_api: - type: object - properties: - enable_admin_role_for_users: - type: boolean - enable_team_superuser: - type: boolean - enable_teams_api: - type: boolean - pam_configuration: - type: string - pam_role_name: - type: string - postgres_superuser_teams: - type: array - items: - type: string - protected_role_names: - type: array - items: - type: string - team_admin_role: - type: string - team_api_role_configuration: - type: object - additionalProperties: - type: string - teams_api_url: - type: string - logging_rest_api: - type: object - properties: - api_port: - type: integer - cluster_history_entries: - type: integer - ring_log_lines: - type: integer - scalyr: - type: object - properties: - scalyr_api_key: - type: string - scalyr_cpu_limit: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - scalyr_cpu_request: - type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - scalyr_image: - type: string - scalyr_memory_limit: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - scalyr_memory_request: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - scalyr_server_url: - type: string - status: - type: object - additionalProperties: + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Image + type: string + description: Spilo image to be used for Pods + jsonPath: .configuration.docker_image + - name: Cluster-Label + type: string + description: Label for K8s resources created by operator + jsonPath: .configuration.kubernetes.cluster_name_label + - name: Service-Account + type: string + description: Name of service account to be used + jsonPath: .configuration.kubernetes.pod_service_account_name + - name: Min-Instances + type: integer + description: Minimum number of instances per Postgres cluster + jsonPath: .configuration.min_instances + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - configuration + properties: + kind: type: string + enum: + - OperatorConfiguration + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + configuration: + type: object + properties: + docker_image: + type: string + default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p2" + enable_crd_validation: + type: boolean + default: true + enable_lazy_spilo_upgrade: + type: boolean + default: false + enable_pgversion_env_var: + type: boolean + default: true + enable_shm_volume: + type: boolean + default: true + enable_spilo_wal_path_compat: + type: boolean + default: false + etcd_host: + type: string + default: "" + kubernetes_use_configmaps: + type: boolean + default: false + max_instances: + type: integer + minimum: -1 # -1 = disabled + default: -1 + min_instances: + type: integer + minimum: -1 # -1 = disabled + default: -1 + resync_period: + type: string + default: "30m" + repair_period: + type: string + default: "5m" + set_memory_request_to_limit: + type: boolean + default: false + sidecar_docker_images: + type: object + additionalProperties: + type: string + sidecars: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + workers: + type: integer + minimum: 1 + default: 8 + users: + type: object + properties: + replication_username: + type: string + default: standby + super_username: + type: string + default: postgres + kubernetes: + type: object + properties: + cluster_domain: + type: string + default: "cluster.local" + cluster_labels: + type: object + additionalProperties: + type: string + default: + application: spilo + cluster_name_label: + type: string + default: "cluster-name" + custom_pod_annotations: + type: object + additionalProperties: + type: string + delete_annotation_date_key: + type: string + delete_annotation_name_key: + type: string + downscaler_annotations: + type: array + items: + type: string + enable_init_containers: + type: boolean + default: true + enable_pod_antiaffinity: + type: boolean + default: false + enable_pod_disruption_budget: + type: boolean + default: true + enable_sidecars: + type: boolean + default: true + infrastructure_roles_secret_name: + type: string + infrastructure_roles_secrets: + type: array + nullable: true + items: + type: object + required: + - secretname + - userkey + - passwordkey + properties: + secretname: + type: string + userkey: + type: string + passwordkey: + type: string + rolekey: + type: string + defaultuservalue: + type: string + defaultrolevalue: + type: string + details: + type: string + template: + type: boolean + inherited_annotations: + type: array + items: + type: string + inherited_labels: + type: array + items: + type: string + master_pod_move_timeout: + type: string + default: "20m" + node_readiness_label: + type: object + additionalProperties: + type: string + oauth_token_secret_name: + type: string + default: "postgresql-operator" + pdb_name_format: + type: string + default: "postgres-{cluster}-pdb" + pod_antiaffinity_topology_key: + type: string + default: "kubernetes.io/hostname" + pod_environment_configmap: + type: string + pod_environment_secret: + type: string + pod_management_policy: + type: string + enum: + - "ordered_ready" + - "parallel" + default: "ordered_ready" + pod_priority_class_name: + type: string + pod_role_label: + type: string + default: "spilo-role" + pod_service_account_definition: + type: string + default: "" + pod_service_account_name: + type: string + default: "postgres-pod" + pod_service_account_role_definition: + type: string + default: "" + pod_service_account_role_binding_definition: + type: string + default: "" + pod_terminate_grace_period: + type: string + default: "5m" + secret_name_template: + type: string + default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + spilo_runasuser: + type: integer + spilo_runasgroup: + type: integer + spilo_fsgroup: + type: integer + spilo_privileged: + type: boolean + default: false + storage_resize_mode: + type: string + enum: + - "ebs" + - "pvc" + - "off" + default: "pvc" + toleration: + type: object + additionalProperties: + type: string + watched_namespace: + type: string + postgres_pod_resources: + type: object + properties: + default_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "1" + default_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "100m" + default_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "500Mi" + default_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "100Mi" + min_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "250m" + min_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "250Mi" + timeouts: + type: object + properties: + pod_label_wait_timeout: + type: string + default: "10m" + pod_deletion_wait_timeout: + type: string + default: "10m" + ready_wait_interval: + type: string + default: "4s" + ready_wait_timeout: + type: string + default: "30s" + resource_check_interval: + type: string + default: "3s" + resource_check_timeout: + type: string + default: "10m" + load_balancer: + type: object + properties: + custom_service_annotations: + type: object + additionalProperties: + type: string + db_hosted_zone: + type: string + default: "db.example.com" + enable_master_load_balancer: + type: boolean + default: true + enable_replica_load_balancer: + type: boolean + default: false + external_traffic_policy: + type: string + enum: + - "Cluster" + - "Local" + default: "Cluster" + master_dns_name_format: + type: string + default: "{cluster}.{team}.{hostedzone}" + replica_dns_name_format: + type: string + default: "{cluster}-repl.{team}.{hostedzone}" + aws_or_gcp: + type: object + properties: + additional_secret_mount: + type: string + additional_secret_mount_path: + type: string + default: "/meta/credentials" + aws_region: + type: string + default: "eu-central-1" + enable_ebs_gp3_migration: + type: boolean + default: false + enable_ebs_gp3_migration_max_size: + type: integer + default: 1000 + gcp_credentials: + type: string + kube_iam_role: + type: string + log_s3_bucket: + type: string + wal_gs_bucket: + type: string + wal_s3_bucket: + type: string + logical_backup: + type: object + properties: + logical_backup_docker_image: + type: string + default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0" + logical_backup_google_application_credentials: + type: string + logical_backup_job_prefix: + type: string + default: "logical-backup-" + logical_backup_provider: + type: string + default: "s3" + logical_backup_s3_access_key_id: + type: string + logical_backup_s3_bucket: + type: string + logical_backup_s3_endpoint: + type: string + logical_backup_s3_region: + type: string + logical_backup_s3_secret_access_key: + type: string + logical_backup_s3_sse: + type: string + logical_backup_schedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + default: "30 00 * * *" + debug: + type: object + properties: + debug_logging: + type: boolean + default: true + enable_database_access: + type: boolean + default: true + teams_api: + type: object + properties: + enable_admin_role_for_users: + type: boolean + default: true + enable_postgres_team_crd: + type: boolean + default: true + enable_postgres_team_crd_superusers: + type: boolean + default: false + enable_team_superuser: + type: boolean + default: false + enable_teams_api: + type: boolean + default: true + pam_configuration: + type: string + default: "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees" + pam_role_name: + type: string + default: "zalandos" + postgres_superuser_teams: + type: array + items: + type: string + protected_role_names: + type: array + items: + type: string + default: + - admin + team_admin_role: + type: string + default: "admin" + team_api_role_configuration: + type: object + additionalProperties: + type: string + default: + log_statement: all + teams_api_url: + type: string + default: "https://teams.example.com/api/" + logging_rest_api: + type: object + properties: + api_port: + type: integer + default: 8080 + cluster_history_entries: + type: integer + default: 1000 + ring_log_lines: + type: integer + default: 100 + scalyr: # deprecated + type: object + properties: + scalyr_api_key: + type: string + scalyr_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "1" + scalyr_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "100m" + scalyr_image: + type: string + scalyr_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "500Mi" + scalyr_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "50Mi" + scalyr_server_url: + type: string + default: "https://upload.eu.scalyr.com" + connection_pooler: + type: object + properties: + connection_pooler_schema: + type: string + default: "pooler" + connection_pooler_user: + type: string + default: "pooler" + connection_pooler_image: + type: string + default: "registry.opensource.zalan.do/acid/pgbouncer:master-12" + connection_pooler_max_db_connections: + type: integer + default: 60 + connection_pooler_mode: + type: string + enum: + - "session" + - "transaction" + default: "transaction" + connection_pooler_number_of_instances: + type: integer + minimum: 1 + default: 2 + connection_pooler_default_cpu_limit: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "1" + connection_pooler_default_cpu_request: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + default: "500m" + connection_pooler_default_memory_limit: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "100Mi" + connection_pooler_default_memory_request: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + default: "100Mi" + status: + type: object + additionalProperties: + type: string diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index 63f17d9fa..a03959805 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -2,8 +2,12 @@ apiVersion: apps/v1 kind: Deployment metadata: name: postgres-operator + labels: + application: postgres-operator spec: replicas: 1 + strategy: + type: "Recreate" selector: matchLabels: name: postgres-operator @@ -15,7 +19,7 @@ spec: serviceAccountName: postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:v1.4.0 + image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.0 imagePullPolicy: IfNotPresent resources: requests: @@ -28,6 +32,7 @@ spec: runAsUser: 1000 runAsNonRoot: true readOnlyRootFilesystem: true + allowPrivilegeEscalation: false env: # provided additional ENV vars can overwrite individual config map entries - name: CONFIG_MAP_NAME @@ -35,3 +40,6 @@ spec: # In order to use the CRD OperatorConfiguration instead, uncomment these lines and comment out the two lines above # - name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT # value: postgresql-operator-default-configuration + # Define an ID to isolate controllers from each other + # - name: CONTROLLER_ID + # value: "second-operator" diff --git a/manifests/postgres-pod-priority-class.yaml b/manifests/postgres-pod-priority-class.yaml new file mode 100644 index 000000000..f1b565f21 --- /dev/null +++ b/manifests/postgres-pod-priority-class.yaml @@ -0,0 +1,11 @@ +apiVersion: scheduling.k8s.io/v1 +description: 'This priority class must be used only for databases controlled by the + Postgres operator' +kind: PriorityClass +metadata: + labels: + application: postgres-operator + name: postgres-pod-priority +preemptionPolicy: PreemptLowerPriority +globalDefault: false +value: 1000000 diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index d3ab13429..622978b06 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -3,18 +3,25 @@ kind: OperatorConfiguration metadata: name: postgresql-operator-default-configuration configuration: + docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2 # enable_crd_validation: true - etcd_host: "" - docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2 + # enable_lazy_spilo_upgrade: false + enable_pgversion_env_var: true # enable_shm_volume: true + enable_spilo_wal_path_compat: false + etcd_host: "" + # kubernetes_use_configmaps: false max_instances: -1 min_instances: -1 resync_period: 30m repair_period: 5m # set_memory_request_to_limit: false - # sidecar_docker_images: - # example: "exampleimage:exampletag" - workers: 4 + # sidecars: + # - image: image:123 + # name: global-sidecar-1 + # ports: + # - containerPort: 80 + workers: 8 users: replication_username: standby super_username: postgres @@ -26,11 +33,26 @@ configuration: # custom_pod_annotations: # keya: valuea # keyb: valueb + # delete_annotation_date_key: delete-date + # delete_annotation_name_key: delete-clustername + # downscaler_annotations: + # - deployment-time + # - downscaler/* enable_init_containers: true enable_pod_antiaffinity: false enable_pod_disruption_budget: true enable_sidecars: true # infrastructure_roles_secret_name: "postgresql-infrastructure-roles" + # infrastructure_roles_secrets: + # - secretname: "monitoring-roles" + # userkey: "user" + # passwordkey: "password" + # rolekey: "inrole" + # - secretname: "other-infrastructure-role" + # userkey: "other-user-key" + # passwordkey: "other-password-key" + # inherited_annotations: + # - owned-by # inherited_labels: # - application # - environment @@ -40,9 +62,10 @@ configuration: oauth_token_secret_name: postgresql-operator pdb_name_format: "postgres-{cluster}-pdb" pod_antiaffinity_topology_key: "kubernetes.io/hostname" - # pod_environment_configmap: "" + # pod_environment_configmap: "default/my-custom-config" + # pod_environment_secret: "my-custom-secret" pod_management_policy: "ordered_ready" - # pod_priority_class_name: "" + # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role # pod_service_account_definition: "" pod_service_account_name: postgres-pod @@ -50,8 +73,11 @@ configuration: # pod_service_account_role_binding_definition: "" pod_terminate_grace_period: 5m secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + # spilo_runasuser: 101 + # spilo_runasgroup: 103 # spilo_fsgroup: 103 spilo_privileged: false + storage_resize_mode: pvc # toleration: {} # watched_namespace: "" postgres_pod_resources: @@ -69,23 +95,31 @@ configuration: resource_check_interval: 3s resource_check_timeout: 10m load_balancer: - # db_hosted_zone: "" - enable_master_load_balancer: false - enable_replica_load_balancer: false # custom_service_annotations: # keyx: valuex # keyy: valuey + # db_hosted_zone: "" + enable_master_load_balancer: false + enable_replica_load_balancer: false + external_traffic_policy: "Cluster" master_dns_name_format: "{cluster}.{team}.{hostedzone}" replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" aws_or_gcp: # additional_secret_mount: "some-secret-name" # additional_secret_mount_path: "/some/dir" aws_region: eu-central-1 + enable_ebs_gp3_migration: false + # enable_ebs_gp3_migration_max_size: 1000 + # gcp_credentials: "" # kube_iam_role: "" # log_s3_bucket: "" + # wal_gs_bucket: "" # wal_s3_bucket: "" logical_backup: - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" + logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0" + # logical_backup_google_application_credentials: "" + logical_backup_job_prefix: "logical-backup-" + logical_backup_provider: "s3" # logical_backup_s3_access_key_id: "" logical_backup_s3_bucket: "my-bucket-url" # logical_backup_s3_endpoint: "" @@ -98,6 +132,8 @@ configuration: enable_database_access: true teams_api: # enable_admin_role_for_users: true + # enable_postgres_team_crd: false + # enable_postgres_team_crd_superusers: false enable_team_superuser: false enable_teams_api: false # pam_configuration: "" @@ -111,14 +147,17 @@ configuration: log_statement: all # teams_api_url: "" logging_rest_api: - api_port: 8008 + api_port: 8080 cluster_history_entries: 1000 ring_log_lines: 100 - scalyr: - # scalyr_api_key: "" - scalyr_cpu_limit: "1" - scalyr_cpu_request: 100m - # scalyr_image: "" - scalyr_memory_limit: 500Mi - scalyr_memory_request: 50Mi - # scalyr_server_url: "" + connection_pooler: + connection_pooler_default_cpu_limit: "1" + connection_pooler_default_cpu_request: "500m" + connection_pooler_default_memory_limit: 100Mi + connection_pooler_default_memory_request: 100Mi + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9" + # connection_pooler_max_db_connections: 60 + connection_pooler_mode: "transaction" + connection_pooler_number_of_instances: 2 + # connection_pooler_schema: "pooler" + # connection_pooler_user: "pooler" diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 453916b26..61a04144c 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: postgresqls.acid.zalan.do @@ -11,325 +11,561 @@ spec: singular: postgresql shortNames: - pg + categories: + - all scope: Namespaced - subresources: - status: {} - version: v1 - validation: - openAPIV3Schema: - type: object - required: - - kind - - apiVersion - - spec - properties: - kind: - type: string - enum: - - postgresql - apiVersion: - type: string - enum: - - acid.zalan.do/v1 - spec: - type: object - required: - - numberOfInstances - - teamId - - postgresql - properties: - allowedSourceRanges: - type: array - nullable: true - items: - type: string - pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$' - clone: - type: object - required: - - cluster - properties: - cluster: - type: string - s3_endpoint: - type: string - s3_access_key_id: - type: string - s3_secret_access_key: - type: string - s3_force_path_style: - type: boolean - s3_wal_path: - type: string - timestamp: - type: string - pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([Zz])|([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$' - # The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC - # Example: 1996-12-19T16:39:57-08:00 - # Note: this field requires a timezone - uid: - format: uuid - type: string - databases: - type: object - additionalProperties: - type: string - # Note: usernames specified here as database owners must be declared in the users key of the spec key. - dockerImage: - type: string - enableLogicalBackup: - type: boolean - enableMasterLoadBalancer: - type: boolean - enableReplicaLoadBalancer: - type: boolean - enableShmVolume: - type: boolean - init_containers: # deprecated - type: array - nullable: true - items: - type: object - additionalProperties: true - initContainers: - type: array - nullable: true - items: - type: object - additionalProperties: true - logicalBackupSchedule: - type: string - pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' - maintenanceWindows: - type: array - items: - type: string - pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' - numberOfInstances: - type: integer - minimum: 0 - patroni: - type: object - properties: - initdb: + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Team + type: string + description: Team responsible for Postgres CLuster + jsonPath: .spec.teamId + - name: Version + type: string + description: PostgreSQL version + jsonPath: .spec.postgresql.version + - name: Pods + type: integer + description: Number of Pods per Postgres cluster + jsonPath: .spec.numberOfInstances + - name: Volume + type: string + description: Size of the bound volume + jsonPath: .spec.volume.size + - name: CPU-Request + type: string + description: Requested CPU for Postgres containers + jsonPath: .spec.resources.requests.cpu + - name: Memory-Request + type: string + description: Requested memory for Postgres containers + jsonPath: .spec.resources.requests.memory + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + - name: Status + type: string + description: Current sync status of postgresql resource + jsonPath: .status.PostgresClusterStatus + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - postgresql + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + required: + - numberOfInstances + - teamId + - postgresql + - volume + properties: + additionalVolumes: + type: array + items: type: object - additionalProperties: + required: + - name + - mountPath + - volumeSource + properties: + name: + type: string + mountPath: + type: string + targetContainers: + type: array + nullable: true + items: + type: string + volumeSource: + type: object + x-kubernetes-preserve-unknown-fields: true + subPath: + type: string + allowedSourceRanges: + type: array + nullable: true + items: + type: string + pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$' + clone: + type: object + required: + - cluster + properties: + cluster: type: string - pg_hba: - type: array - items: + s3_endpoint: type: string - slots: + s3_access_key_id: + type: string + s3_secret_access_key: + type: string + s3_force_path_style: + type: boolean + s3_wal_path: + type: string + timestamp: + type: string + pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$' + # The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC + # Example: 1996-12-19T16:39:57-08:00 + # Note: this field requires a timezone + uid: + format: uuid + type: string + connectionPooler: + type: object + properties: + dockerImage: + type: string + maxDBConnections: + type: integer + mode: + type: string + enum: + - "session" + - "transaction" + numberOfInstances: + type: integer + minimum: 2 + resources: + type: object + required: + - requests + - limits + properties: + limits: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + requests: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + schema: + type: string + user: + type: string + databases: + type: object + additionalProperties: + type: string + # Note: usernames specified here as database owners must be declared in the users key of the spec key. + dockerImage: + type: string + enableConnectionPooler: + type: boolean + enableReplicaConnectionPooler: + type: boolean + enableLogicalBackup: + type: boolean + enableMasterLoadBalancer: + type: boolean + enableReplicaLoadBalancer: + type: boolean + enableShmVolume: + type: boolean + init_containers: # deprecated + type: array + nullable: true + items: type: object - additionalProperties: + x-kubernetes-preserve-unknown-fields: true + initContainers: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + logicalBackupSchedule: + type: string + pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' + maintenanceWindows: + type: array + items: + type: string + pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' + numberOfInstances: + type: integer + minimum: 0 + patroni: + type: object + properties: + initdb: type: object additionalProperties: type: string - ttl: - type: integer - loop_wait: - type: integer - retry_timeout: - type: integer - maximum_lag_on_failover: - type: integer - podAnnotations: - type: object - additionalProperties: - type: string - pod_priority_class_name: # deprecated - type: string - podPriorityClassName: - type: string - postgresql: - type: object - required: - - version - properties: - version: - type: string - enum: - - "9.3" - - "9.4" - - "9.5" - - "9.6" - - "10" - - "11" - - "12" - parameters: - type: object - additionalProperties: - type: string - replicaLoadBalancer: # deprecated - type: boolean - resources: - type: object - required: - - requests - - limits - properties: - limits: - type: object - required: - - cpu - - memory - properties: - cpu: + loop_wait: + type: integer + maximum_lag_on_failover: + type: integer + pg_hba: + type: array + items: type: string - # Decimal natural followed by m, or decimal natural followed by - # dot followed by up to three decimal digits. - # - # This is because the Kubernetes CPU resource has millis as the - # maximum precision. The actual values are checked in code - # because the regular expression would be huge and horrible and - # not very helpful in validation error messages; this one checks - # only the format of the given number. - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - # Note: the value specified here must not be zero or be lower - # than the corresponding request. - memory: - type: string - # You can express memory as a plain integer or as a fixed-point - # integer using one of these suffixes: E, P, T, G, M, k. You can - # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero or be lower - # than the corresponding request. - requests: - type: object - required: - - cpu - - memory - properties: - cpu: - type: string - # Decimal natural followed by m, or decimal natural followed by - # dot followed by up to three decimal digits. - # - # This is because the Kubernetes CPU resource has millis as the - # maximum precision. The actual values are checked in code - # because the regular expression would be huge and horrible and - # not very helpful in validation error messages; this one checks - # only the format of the given number. - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - # Note: the value specified here must not be zero or be higher - # than the corresponding limit. - memory: - type: string - # You can express memory as a plain integer or as a fixed-point - # integer using one of these suffixes: E, P, T, G, M, k. You can - # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki - # - # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero or be higher - # than the corresponding limit. - serviceAnnotations: - type: object - additionalProperties: - type: string - sidecars: - type: array - nullable: true - items: + retry_timeout: + type: integer + slots: + type: object + additionalProperties: + type: object + additionalProperties: + type: string + synchronous_mode: + type: boolean + synchronous_mode_strict: + type: boolean + ttl: + type: integer + podAnnotations: type: object - additionalProperties: true - spiloFSGroup: - type: integer - standby: - type: object - required: - - s3_wal_path - properties: - s3_wal_path: + additionalProperties: type: string - teamId: - type: string - tolerations: - type: array - items: + pod_priority_class_name: # deprecated + type: string + podPriorityClassName: + type: string + postgresql: type: object required: - - key - - operator - - effect + - version properties: - key: - type: string - operator: + version: type: string enum: - - Equal - - Exists - value: - type: string - effect: - type: string - enum: - - NoExecute - - NoSchedule - - PreferNoSchedule - tolerationSeconds: - type: integer - useLoadBalancer: # deprecated - type: boolean - users: - type: object - additionalProperties: + - "9.3" + - "9.4" + - "9.5" + - "9.6" + - "10" + - "11" + - "12" + - "13" + parameters: + type: object + additionalProperties: + type: string + preparedDatabases: + type: object + additionalProperties: + type: object + properties: + defaultUsers: + type: boolean + extensions: + type: object + additionalProperties: + type: string + schemas: + type: object + additionalProperties: + type: object + properties: + defaultUsers: + type: boolean + defaultRoles: + type: boolean + replicaLoadBalancer: # deprecated + type: boolean + resources: + type: object + required: + - requests + - limits + properties: + limits: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + # Decimal natural followed by m, or decimal natural followed by + # dot followed by up to three decimal digits. + # + # This is because the Kubernetes CPU resource has millis as the + # maximum precision. The actual values are checked in code + # because the regular expression would be huge and horrible and + # not very helpful in validation error messages; this one checks + # only the format of the given number. + # + # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + # Note: the value specified here must not be zero or be lower + # than the corresponding request. + memory: + type: string + # You can express memory as a plain integer or as a fixed-point + # integer using one of these suffixes: E, P, T, G, M, k. You can + # also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki + # + # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + # Note: the value specified here must not be zero or be higher + # than the corresponding limit. + requests: + type: object + required: + - cpu + - memory + properties: + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + schedulerName: + type: string + serviceAnnotations: + type: object + additionalProperties: + type: string + sidecars: type: array nullable: true - description: "Role flags specified here must not contradict each other" items: - type: string - enum: - - bypassrls - - BYPASSRLS - - nobypassrls - - NOBYPASSRLS - - createdb - - CREATEDB - - nocreatedb - - NOCREATEDB - - createrole - - CREATEROLE - - nocreaterole - - NOCREATEROLE - - inherit - - INHERIT - - noinherit - - NOINHERIT - - login - - LOGIN - - nologin - - NOLOGIN - - replication - - REPLICATION - - noreplication - - NOREPLICATION - - superuser - - SUPERUSER - - nosuperuser - - NOSUPERUSER - volume: - type: object - required: - - size - properties: - size: - type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - # Note: the value specified here must not be zero. - storageClass: - type: string - subPath: - type: string - status: - type: object - additionalProperties: - type: string + type: object + x-kubernetes-preserve-unknown-fields: true + spiloRunAsUser: + type: integer + spiloRunAsGroup: + type: integer + spiloFSGroup: + type: integer + standby: + type: object + required: + - s3_wal_path + properties: + s3_wal_path: + type: string + teamId: + type: string + tls: + type: object + required: + - secretName + properties: + secretName: + type: string + certificateFile: + type: string + privateKeyFile: + type: string + caFile: + type: string + caSecretName: + type: string + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + required: + - weight + - preference + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + format: int32 + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + required: + - key + - operator + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + tolerations: + type: array + items: + type: object + required: + - key + - operator + - effect + properties: + key: + type: string + operator: + type: string + enum: + - Equal + - Exists + value: + type: string + effect: + type: string + enum: + - NoExecute + - NoSchedule + - PreferNoSchedule + tolerationSeconds: + type: integer + useLoadBalancer: # deprecated + type: boolean + users: + type: object + additionalProperties: + type: array + nullable: true + description: "Role flags specified here must not contradict each other" + items: + type: string + enum: + - bypassrls + - BYPASSRLS + - nobypassrls + - NOBYPASSRLS + - createdb + - CREATEDB + - nocreatedb + - NOCREATEDB + - createrole + - CREATEROLE + - nocreaterole + - NOCREATEROLE + - inherit + - INHERIT + - noinherit + - NOINHERIT + - login + - LOGIN + - nologin + - NOLOGIN + - replication + - REPLICATION + - noreplication + - NOREPLICATION + - superuser + - SUPERUSER + - nosuperuser + - NOSUPERUSER + volume: + type: object + required: + - size + properties: + iops: + type: integer + size: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + # Note: the value specified here must not be zero. + storageClass: + type: string + subPath: + type: string + throughput: + type: integer + status: + type: object + additionalProperties: + type: string diff --git a/manifests/postgresteam.crd.yaml b/manifests/postgresteam.crd.yaml new file mode 100644 index 000000000..2588e53b1 --- /dev/null +++ b/manifests/postgresteam.crd.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: postgresteams.acid.zalan.do +spec: + group: acid.zalan.do + names: + kind: PostgresTeam + listKind: PostgresTeamList + plural: postgresteams + singular: postgresteam + shortNames: + - pgteam + categories: + - all + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - PostgresTeam + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + properties: + additionalSuperuserTeams: + type: object + description: "Map for teamId and associated additional superuser teams" + additionalProperties: + type: array + nullable: true + description: "List of teams to become Postgres superusers" + items: + type: string + additionalTeams: + type: object + description: "Map for teamId and associated additional teams" + additionalProperties: + type: array + nullable: true + description: "List of teams whose members will also be added to the Postgres cluster" + items: + type: string + additionalMembers: + type: object + description: "Map for teamId and associated additional users" + additionalProperties: + type: array + nullable: true + description: "List of users who will also be added to the Postgres cluster" + items: + type: string diff --git a/manifests/standby-manifest.yaml b/manifests/standby-manifest.yaml index 2b621bd10..593f409ec 100644 --- a/manifests/standby-manifest.yaml +++ b/manifests/standby-manifest.yaml @@ -9,7 +9,7 @@ spec: size: 1Gi numberOfInstances: 1 postgresql: - version: "11" + version: "13" # Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming. standby: s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/" diff --git a/mkdocs.yml b/mkdocs.yml index 34f55fac8..b8e8c3e04 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,4 +13,3 @@ nav: - Config parameters: 'reference/operator_parameters.md' - Manifest parameters: 'reference/cluster_manifest.md' - CLI options and environment: 'reference/command_line_and_environment.md' - - Google Summer of Code 2019: 'gsoc-2019/ideas.md' diff --git a/mocks/mocks.go b/mocks/mocks.go new file mode 100644 index 000000000..f726b26e5 --- /dev/null +++ b/mocks/mocks.go @@ -0,0 +1 @@ +package mocks diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 4a2c6f348..d1dd0849d 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -2,7 +2,8 @@ package v1 import ( acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "github.com/zalando/postgres-operator/pkg/util" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -20,49 +21,49 @@ const ( ) // PostgresCRDResourceColumns definition of AdditionalPrinterColumns for postgresql CRD -var PostgresCRDResourceColumns = []apiextv1beta1.CustomResourceColumnDefinition{ - apiextv1beta1.CustomResourceColumnDefinition{ +var PostgresCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition{ + { Name: "Team", Type: "string", Description: "Team responsible for Postgres cluster", JSONPath: ".spec.teamId", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Version", Type: "string", Description: "PostgreSQL version", JSONPath: ".spec.postgresql.version", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Pods", Type: "integer", Description: "Number of Pods per Postgres cluster", JSONPath: ".spec.numberOfInstances", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Volume", Type: "string", Description: "Size of the bound volume", JSONPath: ".spec.volume.size", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "CPU-Request", Type: "string", Description: "Requested CPU for Postgres containers", JSONPath: ".spec.resources.requests.cpu", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Memory-Request", Type: "string", Description: "Requested memory for Postgres containers", JSONPath: ".spec.resources.requests.memory", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Age", Type: "date", JSONPath: ".metadata.creationTimestamp", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Status", Type: "string", Description: "Current sync status of postgresql resource", @@ -71,32 +72,32 @@ var PostgresCRDResourceColumns = []apiextv1beta1.CustomResourceColumnDefinition{ } // OperatorConfigCRDResourceColumns definition of AdditionalPrinterColumns for OperatorConfiguration CRD -var OperatorConfigCRDResourceColumns = []apiextv1beta1.CustomResourceColumnDefinition{ - apiextv1beta1.CustomResourceColumnDefinition{ +var OperatorConfigCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition{ + { Name: "Image", Type: "string", Description: "Spilo image to be used for Pods", JSONPath: ".configuration.docker_image", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Cluster-Label", Type: "string", Description: "Label for K8s resources created by operator", JSONPath: ".configuration.kubernetes.cluster_name_label", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Service-Account", Type: "string", Description: "Name of service account to be used", JSONPath: ".configuration.kubernetes.pod_service_account_name", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Min-Instances", Type: "integer", Description: "Minimum number of instances per Postgres cluster", JSONPath: ".configuration.min_instances", }, - apiextv1beta1.CustomResourceColumnDefinition{ + { Name: "Age", Type: "date", JSONPath: ".metadata.creationTimestamp", @@ -108,14 +109,14 @@ var min1 = 1.0 var minDisable = -1.0 // PostgresCRDResourceValidation to check applied manifest parameters -var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ - OpenAPIV3Schema: &apiextv1beta1.JSONSchemaProps{ +var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextv1.JSONSchemaProps{ Type: "object", Required: []string{"kind", "apiVersion", "spec"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "kind": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"postgresql"`), }, @@ -123,7 +124,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "apiVersion": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"acid.zalan.do/v1"`), }, @@ -131,13 +132,45 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "spec": { Type: "object", - Required: []string{"numberOfInstances", "teamId", "postgresql"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Required: []string{"numberOfInstances", "teamId", "postgresql", "volume"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "additionalVolumes": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Required: []string{"name", "mountPath", "volumeSource"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "name": { + Type: "string", + }, + "mountPath": { + Type: "string", + }, + "targetContainers": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + "volumeSource": { + Type: "object", + XPreserveUnknownFields: util.True(), + }, + "subPath": { + Type: "string", + }, + }, + }, + }, + }, "allowedSourceRanges": { Type: "array", Nullable: true, - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", Pattern: "^(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\/(\\d|[1-2]\\d|3[0-2])$", }, @@ -146,7 +179,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "clone": { Type: "object", Required: []string{"cluster"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cluster": { Type: "string", }, @@ -168,7 +201,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "timestamp": { Type: "string", Description: "Date-time format that specifies a timezone as an offset relative to UTC e.g. 1996-12-19T16:39:57-08:00", - Pattern: "^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\\.[0-9]+)?(([Zz])|([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$", + Pattern: "^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$", }, "uid": { Type: "string", @@ -176,10 +209,80 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, }, }, + "connectionPooler": { + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "dockerImage": { + Type: "string", + }, + "maxDBConnections": { + Type: "integer", + }, + "mode": { + Type: "string", + Enum: []apiextv1.JSON{ + { + Raw: []byte(`"session"`), + }, + { + Raw: []byte(`"transaction"`), + }, + }, + }, + "numberOfInstances": { + Type: "integer", + Minimum: &min1, + }, + "resources": { + Type: "object", + Required: []string{"requests", "limits"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "limits": { + Type: "object", + Required: []string{"cpu", "memory"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "cpu": { + Type: "string", + Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", + Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", + }, + "memory": { + Type: "string", + Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + }, + }, + }, + "requests": { + Type: "object", + Required: []string{"cpu", "memory"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "cpu": { + Type: "string", + Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", + Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", + }, + "memory": { + Type: "string", + Description: "Plain integer or fixed-point integer using one of these suffixes: E, P, T, G, M, k (with or without a tailing i). Must be greater than 0", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + }, + }, + }, + }, + }, + "schema": { + Type: "string", + }, + "user": { + Type: "string", + }, + }, + }, "databases": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", Description: "User names specified here as database owners must be declared in the users key of the spec key", }, @@ -188,6 +291,12 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "dockerImage": { Type: "string", }, + "enableConnectionPooler": { + Type: "boolean", + }, + "enableReplicaConnectionPooler": { + Type: "boolean", + }, "enableLogicalBackup": { Type: "boolean", }, @@ -203,23 +312,19 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "init_containers": { Type: "array", Description: "Deprecated", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Allows: true, - }, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, "initContainers": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Allows: true, - }, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, @@ -229,8 +334,8 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "maintenanceWindows": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", Pattern: "^\\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))\\ *$", }, @@ -242,54 +347,60 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "patroni": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "initdb": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, + "loop_wait": { + Type: "integer", + }, + "maximum_lag_on_failover": { + Type: "integer", + }, "pg_hba": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, + "retry_timeout": { + Type: "integer", + }, "slots": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, }, }, + "synchronous_mode": { + Type: "boolean", + }, + "synchronous_mode_strict": { + Type: "boolean", + }, "ttl": { Type: "integer", }, - "loop_wait": { - Type: "integer", - }, - "retry_timeout": { - Type: "integer", - }, - "maximum_lag_on_failover": { - Type: "integer", - }, }, }, "podAnnotations": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -304,10 +415,10 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "postgresql": { Type: "object", Required: []string{"version"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "version": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"9.3"`), }, @@ -329,18 +440,58 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ { Raw: []byte(`"12"`), }, + { + Raw: []byte(`"13"`), + }, }, }, "parameters": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, }, }, + "preparedDatabases": { + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "defaultUsers": { + Type: "boolean", + }, + "extensions": { + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + "schemas": { + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "defaultUsers": { + Type: "boolean", + }, + "defaultRoles": { + Type: "boolean", + }, + }, + }, + }, + }, + }, + }, + }, + }, "replicaLoadBalancer": { Type: "boolean", Description: "Deprecated", @@ -348,11 +499,11 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "resources": { Type: "object", Required: []string{"requests", "limits"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "limits": { Type: "object", Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cpu": { Type: "string", Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", @@ -368,7 +519,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "requests": { Type: "object", Required: []string{"cpu", "memory"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cpu": { Type: "string", Description: "Decimal natural followed by m, or decimal natural followed by dot followed by up to three decimal digits (precision used by Kubernetes). Must be greater than 0", @@ -383,32 +534,39 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, }, }, + "schedulerName": { + Type: "string", + }, "serviceAnnotations": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, "sidecars": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ - Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Allows: true, - }, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), }, }, }, + "spiloRunAsUser": { + Type: "integer", + }, + "spiloRunAsGroup": { + Type: "integer", + }, "spiloFSGroup": { Type: "integer", }, "standby": { Type: "object", Required: []string{"s3_wal_path"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "s3_wal_path": { Type: "string", }, @@ -417,19 +575,125 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "teamId": { Type: "string", }, + "tls": { + Type: "object", + Required: []string{"secretName"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "secretName": { + Type: "string", + }, + "certificateFile": { + Type: "string", + }, + "privateKeyFile": { + Type: "string", + }, + "caFile": { + Type: "string", + }, + "caSecretName": { + Type: "string", + }, + }, + }, + "nodeAffinity": { + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "preferredDuringSchedulingIgnoredDuringExecution": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Required: []string{"preference, weight"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "preference": { + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "matchExpressions": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, + "matchFields": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, + }, + }, + "weight": { + Type: "integer", + Format: "int32", + }, + }, + }, + }, + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + Type: "object", + Required: []string{"nodeSelectorTerms"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "nodeSelectorTerms": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "matchExpressions": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, + "matchFields": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "tolerations": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "object", Required: []string{"key", "operator", "effect"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "key": { Type: "string", }, "operator": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"Equal"`), }, @@ -443,7 +707,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "effect": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"NoExecute"`), }, @@ -468,15 +732,15 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, "users": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "array", Description: "Role flags specified here must not contradict each other", Nullable: true, - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"bypassrls"`), }, @@ -570,7 +834,10 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "volume": { Type: "object", Required: []string{"size"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ + "iops": { + Type: "integer", + }, "size": { Type: "string", Description: "Value must not be zero", @@ -582,14 +849,17 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ "subPath": { Type: "string", }, + "throughput": { + Type: "integer", + }, }, }, }, }, "status": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -599,14 +869,14 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ } // OperatorConfigCRDResourceValidation to check applied manifest parameters -var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ - OpenAPIV3Schema: &apiextv1beta1.JSONSchemaProps{ +var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextv1.JSONSchemaProps{ Type: "object", Required: []string{"kind", "apiVersion", "configuration"}, - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "kind": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"OperatorConfiguration"`), }, @@ -614,7 +884,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "apiVersion": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"acid.zalan.do/v1"`), }, @@ -622,19 +892,28 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "configuration": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "docker_image": { Type: "string", }, "enable_crd_validation": { Type: "boolean", }, + "enable_lazy_spilo_upgrade": { + Type: "boolean", + }, "enable_shm_volume": { Type: "boolean", }, + "enable_spilo_wal_path_compat": { + Type: "boolean", + }, "etcd_host": { Type: "string", }, + "kubernetes_use_configmaps": { + Type: "boolean", + }, "max_instances": { Type: "integer", Description: "-1 = disabled", @@ -656,19 +935,28 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "sidecar_docker_images": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, + "sidecars": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), + }, + }, + }, "workers": { Type: "integer", Minimum: &min1, }, "users": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "replication_username": { Type: "string", }, @@ -679,14 +967,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "kubernetes": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "cluster_domain": { Type: "string", }, "cluster_labels": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -696,8 +984,22 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "custom_pod_annotations": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + "delete_annotation_date_key": { + Type: "string", + }, + "delete_annotation_name_key": { + Type: "string", + }, + "downscaler_annotations": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -717,10 +1019,53 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "infrastructure_roles_secret_name": { Type: "string", }, + "infrastructure_roles_secrets": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + Required: []string{"secretname", "userkey", "passwordkey"}, + Properties: map[string]apiextv1.JSONSchemaProps{ + "secretname": { + Type: "string", + }, + "userkey": { + Type: "string", + }, + "passwordkey": { + Type: "string", + }, + "rolekey": { + Type: "string", + }, + "defaultuservalue": { + Type: "string", + }, + "defaultrolevalue": { + Type: "string", + }, + "details": { + Type: "string", + }, + "template": { + Type: "boolean", + }, + }, + }, + }, + }, + "inherited_annotations": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "inherited_labels": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -730,8 +1075,8 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "node_readiness_label": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -748,9 +1093,12 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "pod_environment_configmap": { Type: "string", }, + "pod_environment_secret": { + Type: "string", + }, "pod_management_policy": { Type: "string", - Enum: []apiextv1beta1.JSON{ + Enum: []apiextv1.JSON{ { Raw: []byte(`"ordered_ready"`), }, @@ -783,16 +1131,36 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "secret_name_template": { Type: "string", }, + "spilo_runasuser": { + Type: "integer", + }, + "spilo_runasgroup": { + Type: "integer", + }, "spilo_fsgroup": { Type: "integer", }, "spilo_privileged": { Type: "boolean", }, + "storage_resize_mode": { + Type: "string", + Enum: []apiextv1.JSON{ + { + Raw: []byte(`"ebs"`), + }, + { + Raw: []byte(`"pvc"`), + }, + { + Raw: []byte(`"off"`), + }, + }, + }, "toleration": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -804,7 +1172,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "postgres_pod_resources": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "default_cpu_limit": { Type: "string", Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", @@ -833,7 +1201,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "timeouts": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "pod_label_wait_timeout": { Type: "string", }, @@ -856,11 +1224,11 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "load_balancer": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "custom_service_annotations": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -874,6 +1242,17 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "enable_replica_load_balancer": { Type: "boolean", }, + "external_traffic_policy": { + Type: "string", + Enum: []apiextv1.JSON{ + { + Raw: []byte(`"Cluster"`), + }, + { + Raw: []byte(`"Local"`), + }, + }, + }, "master_dns_name_format": { Type: "string", }, @@ -884,7 +1263,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "aws_or_gcp": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "additional_secret_mount": { Type: "string", }, @@ -894,6 +1273,15 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "aws_region": { Type: "string", }, + "enable_ebs_gp3_migration": { + Type: "boolean", + }, + "enable_ebs_gp3_migration_max_size": { + Type: "integer", + }, + "gcp_credentials": { + Type: "string", + }, "kube_iam_role": { Type: "string", }, @@ -907,10 +1295,19 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "logical_backup": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "logical_backup_docker_image": { Type: "string", }, + "logical_backup_google_application_credentials": { + Type: "string", + }, + "logical_backup_job_prefix": { + Type: "string", + }, + "logical_backup_provider": { + Type: "string", + }, "logical_backup_s3_access_key_id": { Type: "string", }, @@ -937,7 +1334,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "debug": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "debug_logging": { Type: "boolean", }, @@ -948,10 +1345,16 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "teams_api": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "enable_admin_role_for_users": { Type: "boolean", }, + "enable_postgres_team_crd": { + Type: "boolean", + }, + "enable_postgres_team_crd_superusers": { + Type: "boolean", + }, "enable_team_superuser": { Type: "boolean", }, @@ -966,16 +1369,16 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "postgres_superuser_teams": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, }, "protected_role_names": { Type: "array", - Items: &apiextv1beta1.JSONSchemaPropsOrArray{ - Schema: &apiextv1beta1.JSONSchemaProps{ + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -985,8 +1388,8 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "team_api_role_configuration": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -998,7 +1401,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "logging_rest_api": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "api_port": { Type: "integer", }, @@ -1012,7 +1415,7 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, "scalyr": { Type: "object", - Properties: map[string]apiextv1beta1.JSONSchemaProps{ + Properties: map[string]apiextv1.JSONSchemaProps{ "scalyr_api_key": { Type: "string", }, @@ -1040,12 +1443,60 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, }, }, + "connection_pooler": { + Type: "object", + Properties: map[string]apiextv1.JSONSchemaProps{ + "connection_pooler_default_cpu_limit": { + Type: "string", + Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", + }, + "connection_pooler_default_cpu_request": { + Type: "string", + Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", + }, + "connection_pooler_default_memory_limit": { + Type: "string", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + }, + "connection_pooler_default_memory_request": { + Type: "string", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + }, + "connection_pooler_image": { + Type: "string", + }, + "connection_pooler_max_db_connections": { + Type: "integer", + }, + "connection_pooler_mode": { + Type: "string", + Enum: []apiextv1.JSON{ + { + Raw: []byte(`"session"`), + }, + { + Raw: []byte(`"transaction"`), + }, + }, + }, + "connection_pooler_number_of_instances": { + Type: "integer", + Minimum: &min1, + }, + "connection_pooler_schema": { + Type: "string", + }, + "connection_pooler_user": { + Type: "string", + }, + }, + }, }, }, "status": { Type: "object", - AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{ - Schema: &apiextv1beta1.JSONSchemaProps{ + AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{ + Schema: &apiextv1.JSONSchemaProps{ Type: "string", }, }, @@ -1054,32 +1505,39 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation }, } -func buildCRD(name, kind, plural, short string, columns []apiextv1beta1.CustomResourceColumnDefinition, validation apiextv1beta1.CustomResourceValidation) *apiextv1beta1.CustomResourceDefinition { - return &apiextv1beta1.CustomResourceDefinition{ +func buildCRD(name, kind, plural, short string, columns []apiextv1.CustomResourceColumnDefinition, validation apiextv1.CustomResourceValidation) *apiextv1.CustomResourceDefinition { + return &apiextv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: apiextv1beta1.CustomResourceDefinitionSpec{ - Group: SchemeGroupVersion.Group, - Version: SchemeGroupVersion.Version, - Names: apiextv1beta1.CustomResourceDefinitionNames{ + Spec: apiextv1.CustomResourceDefinitionSpec{ + Group: SchemeGroupVersion.Group, + Names: apiextv1.CustomResourceDefinitionNames{ Plural: plural, ShortNames: []string{short}, Kind: kind, + Categories: []string{"all"}, }, - Scope: apiextv1beta1.NamespaceScoped, - Subresources: &apiextv1beta1.CustomResourceSubresources{ - Status: &apiextv1beta1.CustomResourceSubresourceStatus{}, + Scope: apiextv1.NamespaceScoped, + Versions: []apiextv1.CustomResourceDefinitionVersion{ + { + Name: SchemeGroupVersion.Version, + Served: true, + Storage: true, + Subresources: &apiextv1.CustomResourceSubresources{ + Status: &apiextv1.CustomResourceSubresourceStatus{}, + }, + AdditionalPrinterColumns: columns, + Schema: &validation, + }, }, - AdditionalPrinterColumns: columns, - Validation: &validation, }, } } // PostgresCRD returns CustomResourceDefinition built from PostgresCRDResource -func PostgresCRD(enableValidation *bool) *apiextv1beta1.CustomResourceDefinition { - postgresCRDvalidation := apiextv1beta1.CustomResourceValidation{} +func PostgresCRD(enableValidation *bool) *apiextv1.CustomResourceDefinition { + postgresCRDvalidation := apiextv1.CustomResourceValidation{} if enableValidation != nil && *enableValidation { postgresCRDvalidation = PostgresCRDResourceValidation @@ -1094,8 +1552,8 @@ func PostgresCRD(enableValidation *bool) *apiextv1beta1.CustomResourceDefinition } // ConfigurationCRD returns CustomResourceDefinition built from OperatorConfigCRDResource -func ConfigurationCRD(enableValidation *bool) *apiextv1beta1.CustomResourceDefinition { - opconfigCRDvalidation := apiextv1beta1.CustomResourceValidation{} +func ConfigurationCRD(enableValidation *bool) *apiextv1.CustomResourceDefinition { + opconfigCRDvalidation := apiextv1.CustomResourceValidation{} if enableValidation != nil && *enableValidation { opconfigCRDvalidation = OperatorConfigCRDResourceValidation diff --git a/pkg/apis/acid.zalan.do/v1/marshal.go b/pkg/apis/acid.zalan.do/v1/marshal.go index d180f784c..9521082fc 100644 --- a/pkg/apis/acid.zalan.do/v1/marshal.go +++ b/pkg/apis/acid.zalan.do/v1/marshal.go @@ -102,7 +102,7 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error { } tmp.Error = err.Error() - tmp.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} + tmp.Status.PostgresClusterStatus = ClusterStatusInvalid *p = Postgresql(tmp) @@ -113,9 +113,10 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error { if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil { tmp2.Error = err.Error() tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} - } else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil { + } else if err := validateCloneClusterDescription(tmp2.Spec.Clone); err != nil { + tmp2.Error = err.Error() - tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} + tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid } else { tmp2.Spec.ClusterName = clusterName } diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index e126b6fb6..881746d9f 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -1,11 +1,14 @@ package v1 +// Operator configuration CRD definition, please use snake_case for field names. + import ( "github.com/zalando/postgres-operator/pkg/util/config" "time" "github.com/zalando/postgres-operator/pkg/spec" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -42,36 +45,44 @@ type PostgresUsersConfiguration struct { type KubernetesMetaConfiguration struct { PodServiceAccountName string `json:"pod_service_account_name,omitempty"` // TODO: change it to the proper json - PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` - PodServiceAccountRoleDefinition string `json:"pod_service_account_role_definition,omitempty"` - PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` - PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"` - SpiloPrivileged bool `json:"spilo_privileged,omitempty"` - SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"` - WatchedNamespace string `json:"watched_namespace,omitempty"` - PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` - EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"` - EnableInitContainers *bool `json:"enable_init_containers,omitempty"` - EnableSidecars *bool `json:"enable_sidecars,omitempty"` - SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"` - ClusterDomain string `json:"cluster_domain"` - OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` - InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` - PodRoleLabel string `json:"pod_role_label,omitempty"` - ClusterLabels map[string]string `json:"cluster_labels,omitempty"` - InheritedLabels []string `json:"inherited_labels,omitempty"` - ClusterNameLabel string `json:"cluster_name_label,omitempty"` - NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` - CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"` + PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` + PodServiceAccountRoleDefinition string `json:"pod_service_account_role_definition,omitempty"` + PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` + PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"` + SpiloPrivileged bool `json:"spilo_privileged,omitempty"` + SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"` + SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"` + SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"` + WatchedNamespace string `json:"watched_namespace,omitempty"` + PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` + EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"` + StorageResizeMode string `json:"storage_resize_mode,omitempty"` + EnableInitContainers *bool `json:"enable_init_containers,omitempty"` + EnableSidecars *bool `json:"enable_sidecars,omitempty"` + SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"` + ClusterDomain string `json:"cluster_domain,omitempty"` + OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` + InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` + InfrastructureRolesDefs []*config.InfrastructureRole `json:"infrastructure_roles_secrets,omitempty"` + PodRoleLabel string `json:"pod_role_label,omitempty"` + ClusterLabels map[string]string `json:"cluster_labels,omitempty"` + InheritedLabels []string `json:"inherited_labels,omitempty"` + InheritedAnnotations []string `json:"inherited_annotations,omitempty"` + DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"` + ClusterNameLabel string `json:"cluster_name_label,omitempty"` + DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"` + DeleteAnnotationNameKey string `json:"delete_annotation_name_key,omitempty"` + NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` + CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"` // TODO: use a proper toleration structure? - PodToleration map[string]string `json:"toleration,omitempty"` - // TODO: use namespacedname - PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"` - PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` - MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"` - EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` - PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` - PodManagementPolicy string `json:"pod_management_policy,omitempty"` + PodToleration map[string]string `json:"toleration,omitempty"` + PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"` + PodEnvironmentSecret string `json:"pod_environment_secret,omitempty"` + PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` + MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"` + EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` + PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` + PodManagementPolicy string `json:"pod_management_policy,omitempty"` } // PostgresPodResourcesDefaults defines the spec of default resources @@ -102,17 +113,22 @@ type LoadBalancerConfiguration struct { CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"` MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"` ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"` + ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"` } // AWSGCPConfiguration defines the configuration for AWS // TODO complete Google Cloud Platform (GCP) configuration type AWSGCPConfiguration struct { - WALES3Bucket string `json:"wal_s3_bucket,omitempty"` - AWSRegion string `json:"aws_region,omitempty"` - LogS3Bucket string `json:"log_s3_bucket,omitempty"` - KubeIAMRole string `json:"kube_iam_role,omitempty"` - AdditionalSecretMount string `json:"additional_secret_mount,omitempty"` - AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"` + WALES3Bucket string `json:"wal_s3_bucket,omitempty"` + AWSRegion string `json:"aws_region,omitempty"` + WALGSBucket string `json:"wal_gs_bucket,omitempty"` + GCPCredentials string `json:"gcp_credentials,omitempty"` + LogS3Bucket string `json:"log_s3_bucket,omitempty"` + KubeIAMRole string `json:"kube_iam_role,omitempty"` + AdditionalSecretMount string `json:"additional_secret_mount,omitempty"` + AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"` + EnableEBSGp3Migration bool `json:"enable_ebs_gp3_migration" default:"false"` + EnableEBSGp3MigrationMaxSize int64 `json:"enable_ebs_gp3_migration_max_size" default:"1000"` } // OperatorDebugConfiguration defines options for the debug mode @@ -123,16 +139,18 @@ type OperatorDebugConfiguration struct { // TeamsAPIConfiguration defines the configuration of TeamsAPI type TeamsAPIConfiguration struct { - EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` - TeamsAPIUrl string `json:"teams_api_url,omitempty"` - TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` - EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"` - EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"` - TeamAdminRole string `json:"team_admin_role,omitempty"` - PamRoleName string `json:"pam_role_name,omitempty"` - PamConfiguration string `json:"pam_configuration,omitempty"` - ProtectedRoles []string `json:"protected_role_names,omitempty"` - PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` + EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` + TeamsAPIUrl string `json:"teams_api_url,omitempty"` + TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` + EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"` + EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"` + TeamAdminRole string `json:"team_admin_role,omitempty"` + PamRoleName string `json:"pam_role_name,omitempty"` + PamConfiguration string `json:"pam_configuration,omitempty"` + ProtectedRoles []string `json:"protected_role_names,omitempty"` + PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` + EnablePostgresTeamCRD bool `json:"enable_postgres_team_crd,omitempty"` + EnablePostgresTeamCRDSuperusers bool `json:"enable_postgres_team_crd_superusers,omitempty"` } // LoggingRESTAPIConfiguration defines Logging API conf @@ -153,22 +171,43 @@ type ScalyrConfiguration struct { ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"` } +// ConnectionPoolerConfiguration defines default configuration for connection pooler +type ConnectionPoolerConfiguration struct { + NumberOfInstances *int32 `json:"connection_pooler_number_of_instances,omitempty"` + Schema string `json:"connection_pooler_schema,omitempty"` + User string `json:"connection_pooler_user,omitempty"` + Image string `json:"connection_pooler_image,omitempty"` + Mode string `json:"connection_pooler_mode,omitempty"` + MaxDBConnections *int32 `json:"connection_pooler_max_db_connections,omitempty"` + DefaultCPURequest string `json:"connection_pooler_default_cpu_request,omitempty"` + DefaultMemoryRequest string `json:"connection_pooler_default_memory_request,omitempty"` + DefaultCPULimit string `json:"connection_pooler_default_cpu_limit,omitempty"` + DefaultMemoryLimit string `json:"connection_pooler_default_memory_limit,omitempty"` +} + // OperatorLogicalBackupConfiguration defines configuration for logical backup type OperatorLogicalBackupConfiguration struct { - Schedule string `json:"logical_backup_schedule,omitempty"` - DockerImage string `json:"logical_backup_docker_image,omitempty"` - S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` - S3Region string `json:"logical_backup_s3_region,omitempty"` - S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` - S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` - S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` - S3SSE string `json:"logical_backup_s3_sse,omitempty"` + Schedule string `json:"logical_backup_schedule,omitempty"` + DockerImage string `json:"logical_backup_docker_image,omitempty"` + BackupProvider string `json:"logical_backup_provider,omitempty"` + S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` + S3Region string `json:"logical_backup_s3_region,omitempty"` + S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` + S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` + S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` + S3SSE string `json:"logical_backup_s3_sse,omitempty"` + GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"` + JobPrefix string `json:"logical_backup_job_prefix,omitempty"` } // OperatorConfigurationData defines the operation config type OperatorConfigurationData struct { EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"` + EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"` + EnablePgVersionEnvVar bool `json:"enable_pgversion_env_var,omitempty"` + EnableSpiloWalPathCompat bool `json:"enable_spilo_wal_path_compat,omitempty"` EtcdHost string `json:"etcd_host,omitempty"` + KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"` DockerImage string `json:"docker_image,omitempty"` Workers uint32 `json:"workers,omitempty"` MinInstances int32 `json:"min_instances,omitempty"` @@ -177,7 +216,8 @@ type OperatorConfigurationData struct { RepairPeriod Duration `json:"repair_period,omitempty"` SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"` ShmVolume *bool `json:"enable_shm_volume,omitempty"` - Sidecars map[string]string `json:"sidecar_docker_images,omitempty"` + SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers + SidecarContainers []v1.Container `json:"sidecars,omitempty"` PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"` @@ -189,6 +229,7 @@ type OperatorConfigurationData struct { LoggingRESTAPI LoggingRESTAPIConfiguration `json:"logging_rest_api"` Scalyr ScalyrConfiguration `json:"scalyr"` LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"` + ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"` } //Duration shortens this frequently used name diff --git a/pkg/apis/acid.zalan.do/v1/postgres_team_type.go b/pkg/apis/acid.zalan.do/v1/postgres_team_type.go new file mode 100644 index 000000000..5697c193e --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/postgres_team_type.go @@ -0,0 +1,33 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PostgresTeam defines Custom Resource Definition Object for team management. +type PostgresTeam struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresTeamSpec `json:"spec"` +} + +// PostgresTeamSpec defines the specification for the PostgresTeam TPR. +type PostgresTeamSpec struct { + AdditionalSuperuserTeams map[string][]string `json:"additionalSuperuserTeams,omitempty"` + AdditionalTeams map[string][]string `json:"additionalTeams,omitempty"` + AdditionalMembers map[string][]string `json:"additionalMembers,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PostgresTeamList defines a list of PostgresTeam definitions. +type PostgresTeamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []PostgresTeam `json:"items"` +} diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 07b42d4d4..7346fb0e5 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -1,5 +1,7 @@ package v1 +// Postgres CRD definition, please use CamelCase for field names. + import ( "time" @@ -27,10 +29,16 @@ type PostgresSpec struct { Patroni `json:"patroni,omitempty"` Resources `json:"resources,omitempty"` + EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"` + EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"` + ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"` + TeamID string `json:"teamId"` DockerImage string `json:"dockerImage,omitempty"` - SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"` + SpiloRunAsUser *int64 `json:"spiloRunAsUser,omitempty"` + SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"` + SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"` // vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest // in that case the var evaluates to nil and the value is taken from the operator config @@ -45,22 +53,27 @@ type PostgresSpec struct { // load balancers' source ranges are the same for master and replica services AllowedSourceRanges []string `json:"allowedSourceRanges"` - NumberOfInstances int32 `json:"numberOfInstances"` - Users map[string]UserFlags `json:"users"` - MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` - Clone CloneDescription `json:"clone"` - ClusterName string `json:"-"` - Databases map[string]string `json:"databases,omitempty"` - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - Sidecars []Sidecar `json:"sidecars,omitempty"` - InitContainers []v1.Container `json:"initContainers,omitempty"` - PodPriorityClassName string `json:"podPriorityClassName,omitempty"` - ShmVolume *bool `json:"enableShmVolume,omitempty"` - EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` - LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` - StandbyCluster *StandbyDescription `json:"standby"` - PodAnnotations map[string]string `json:"podAnnotations"` - ServiceAnnotations map[string]string `json:"serviceAnnotations"` + NumberOfInstances int32 `json:"numberOfInstances"` + Users map[string]UserFlags `json:"users,omitempty"` + MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` + Clone *CloneDescription `json:"clone,omitempty"` + ClusterName string `json:"-"` + Databases map[string]string `json:"databases,omitempty"` + PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"` + SchedulerName *string `json:"schedulerName,omitempty"` + NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + Sidecars []Sidecar `json:"sidecars,omitempty"` + InitContainers []v1.Container `json:"initContainers,omitempty"` + PodPriorityClassName string `json:"podPriorityClassName,omitempty"` + ShmVolume *bool `json:"enableShmVolume,omitempty"` + EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` + LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` + StandbyCluster *StandbyDescription `json:"standby,omitempty"` + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + TLS *TLSDescription `json:"tls,omitempty"` + AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"` // deprecated json tags InitContainersOld []v1.Container `json:"init_containers,omitempty"` @@ -77,6 +90,19 @@ type PostgresqlList struct { Items []Postgresql `json:"items"` } +// PreparedDatabase describes elements to be bootstrapped +type PreparedDatabase struct { + PreparedSchemas map[string]PreparedSchema `json:"schemas,omitempty"` + DefaultUsers bool `json:"defaultUsers,omitempty" defaults:"false"` + Extensions map[string]string `json:"extensions,omitempty"` +} + +// PreparedSchema describes elements to be bootstrapped per schema +type PreparedSchema struct { + DefaultRoles *bool `json:"defaultRoles,omitempty" defaults:"true"` + DefaultUsers bool `json:"defaultUsers,omitempty" defaults:"false"` +} + // MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster. type MaintenanceWindow struct { Everyday bool @@ -88,14 +114,26 @@ type MaintenanceWindow struct { // Volume describes a single volume in the manifest. type Volume struct { Size string `json:"size"` - StorageClass string `json:"storageClass"` + StorageClass string `json:"storageClass,omitempty"` SubPath string `json:"subPath,omitempty"` + Iops *int64 `json:"iops,omitempty"` + Throughput *int64 `json:"throughput,omitempty"` + VolumeType string `json:"type,omitempty"` +} + +// AdditionalVolume specs additional optional volumes for statefulset +type AdditionalVolume struct { + Name string `json:"name"` + MountPath string `json:"mountPath"` + SubPath string `json:"subPath,omitempty"` + TargetContainers []string `json:"targetContainers"` + VolumeSource v1.VolumeSource `json:"volumeSource"` } // PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values. type PostgresqlParam struct { PgVersion string `json:"version"` - Parameters map[string]string `json:"parameters"` + Parameters map[string]string `json:"parameters,omitempty"` } // ResourceDescription describes CPU and memory resources defined for a cluster. @@ -112,20 +150,31 @@ type Resources struct { // Patroni contains Patroni-specific configuration type Patroni struct { - InitDB map[string]string `json:"initdb"` - PgHba []string `json:"pg_hba"` - TTL uint32 `json:"ttl"` - LoopWait uint32 `json:"loop_wait"` - RetryTimeout uint32 `json:"retry_timeout"` - MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213 - Slots map[string]map[string]string `json:"slots"` + InitDB map[string]string `json:"initdb,omitempty"` + PgHba []string `json:"pg_hba,omitempty"` + TTL uint32 `json:"ttl,omitempty"` + LoopWait uint32 `json:"loop_wait,omitempty"` + RetryTimeout uint32 `json:"retry_timeout,omitempty"` + MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213 + Slots map[string]map[string]string `json:"slots,omitempty"` + SynchronousMode bool `json:"synchronous_mode,omitempty"` + SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"` } -//StandbyCluster +// StandbyDescription contains s3 wal path type StandbyDescription struct { S3WalPath string `json:"s3_wal_path,omitempty"` } +// TLSDescription specs TLS properties +type TLSDescription struct { + SecretName string `json:"secretName,omitempty"` + CertificateFile string `json:"certificateFile,omitempty"` + PrivateKeyFile string `json:"privateKeyFile,omitempty"` + CAFile string `json:"caFile,omitempty"` + CASecretName string `json:"caSecretName,omitempty"` +} + // CloneDescription describes which cluster the new should clone and up to which point in time type CloneDescription struct { ClusterName string `json:"cluster,omitempty"` @@ -154,3 +203,24 @@ type UserFlags []string type PostgresStatus struct { PostgresClusterStatus string `json:"PostgresClusterStatus"` } + +// ConnectionPooler Options for connection pooler +// +// TODO: prepared snippets of configuration, one can choose via type, e.g. +// pgbouncer-large (with higher resources) or odyssey-small (with smaller +// resources) +// Type string `json:"type,omitempty"` +// +// TODO: figure out what other important parameters of the connection pooler it +// makes sense to expose. E.g. pool size (min/max boundaries), max client +// connections etc. +type ConnectionPooler struct { + NumberOfInstances *int32 `json:"numberOfInstances,omitempty"` + Schema string `json:"schema,omitempty"` + User string `json:"user,omitempty"` + Mode string `json:"mode,omitempty"` + DockerImage string `json:"dockerImage,omitempty"` + MaxDBConnections *int32 `json:"maxDBConnections,omitempty"` + + Resources `json:"resources,omitempty"` +} diff --git a/pkg/apis/acid.zalan.do/v1/register.go b/pkg/apis/acid.zalan.do/v1/register.go index 1c30e35fb..9dcbf2baf 100644 --- a/pkg/apis/acid.zalan.do/v1/register.go +++ b/pkg/apis/acid.zalan.do/v1/register.go @@ -1,11 +1,10 @@ package v1 import ( + acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" ) // APIVersion of the `postgresql` and `operator` CRDs @@ -44,6 +43,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { // TODO: User uppercase CRDResourceKind of our types in the next major API version scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresql"), &Postgresql{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresqlList"), &PostgresqlList{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeam"), &PostgresTeam{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeamList"), &PostgresTeamList{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfiguration"), &OperatorConfiguration{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfigurationList"), diff --git a/pkg/apis/acid.zalan.do/v1/util.go b/pkg/apis/acid.zalan.do/v1/util.go index db6efcd71..a795ec685 100644 --- a/pkg/apis/acid.zalan.do/v1/util.go +++ b/pkg/apis/acid.zalan.do/v1/util.go @@ -72,7 +72,7 @@ func extractClusterName(clusterName string, teamName string) (string, error) { func validateCloneClusterDescription(clone *CloneDescription) error { // when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name - if clone.ClusterName != "" && clone.EndTimestamp == "" { + if clone != nil && clone.ClusterName != "" && clone.EndTimestamp == "" { if !serviceNameRegex.MatchString(clone.ClusterName) { return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q", serviceNameRegexString) diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index 28e9e8ca4..bf6875a82 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -163,7 +163,7 @@ var unmarshalCluster = []struct { "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(), }, - marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":"Invalid"}`), err: nil}, { about: "example with /status subresource", @@ -184,7 +184,7 @@ var unmarshalCluster = []struct { "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(), }, - marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`), err: nil}, { about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName", @@ -327,7 +327,7 @@ var unmarshalCluster = []struct { EndTime: mustParseTime("05:15"), }, }, - Clone: CloneDescription{ + Clone: &CloneDescription{ ClusterName: "acid-batman", }, ClusterName: "testcluster1", @@ -351,7 +351,7 @@ var unmarshalCluster = []struct { Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, Error: errors.New("name must match {TEAM}-{NAME} format").Error(), }, - marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`), err: nil}, { about: "example with clone", @@ -366,7 +366,7 @@ var unmarshalCluster = []struct { }, Spec: PostgresSpec{ TeamID: "acid", - Clone: CloneDescription{ + Clone: &CloneDescription{ ClusterName: "team-batman", }, ClusterName: "testcluster1", @@ -405,7 +405,7 @@ var unmarshalCluster = []struct { err: errors.New("unexpected end of JSON input")}, { about: "expect error on JSON with field's value malformatted", - in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), + in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`), out: Postgresql{}, marshal: []byte{}, err: errors.New("invalid character 'q' looking for beginning of value"), diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index aaae1f04b..4bcbd2f5e 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -27,6 +27,7 @@ SOFTWARE. package v1 import ( + config "github.com/zalando/postgres-operator/pkg/util/config" corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -47,6 +48,28 @@ func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalVolume) DeepCopyInto(out *AdditionalVolume) { + *out = *in + if in.TargetContainers != nil { + in, out := &in.TargetContainers, &out.TargetContainers + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolume. +func (in *AdditionalVolume) DeepCopy() *AdditionalVolume { + if in == nil { + return nil + } + out := new(AdditionalVolume) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CloneDescription) DeepCopyInto(out *CloneDescription) { *out = *in @@ -68,9 +91,72 @@ func (in *CloneDescription) DeepCopy() *CloneDescription { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPooler) DeepCopyInto(out *ConnectionPooler) { + *out = *in + if in.NumberOfInstances != nil { + in, out := &in.NumberOfInstances, &out.NumberOfInstances + *out = new(int32) + **out = **in + } + if in.MaxDBConnections != nil { + in, out := &in.MaxDBConnections, &out.MaxDBConnections + *out = new(int32) + **out = **in + } + out.Resources = in.Resources + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPooler. +func (in *ConnectionPooler) DeepCopy() *ConnectionPooler { + if in == nil { + return nil + } + out := new(ConnectionPooler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolerConfiguration) DeepCopyInto(out *ConnectionPoolerConfiguration) { + *out = *in + if in.NumberOfInstances != nil { + in, out := &in.NumberOfInstances, &out.NumberOfInstances + *out = new(int32) + **out = **in + } + if in.MaxDBConnections != nil { + in, out := &in.MaxDBConnections, &out.MaxDBConnections + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolerConfiguration. +func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfiguration { + if in == nil { + return nil + } + out := new(ConnectionPoolerConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) { *out = *in + if in.SpiloRunAsUser != nil { + in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser + *out = new(int64) + **out = **in + } + if in.SpiloRunAsGroup != nil { + in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup + *out = new(int64) + **out = **in + } if in.SpiloFSGroup != nil { in, out := &in.SpiloFSGroup, &out.SpiloFSGroup *out = new(int64) @@ -93,6 +179,17 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura } out.OAuthTokenSecretName = in.OAuthTokenSecretName out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName + if in.InfrastructureRolesDefs != nil { + in, out := &in.InfrastructureRolesDefs, &out.InfrastructureRolesDefs + *out = make([]*config.InfrastructureRole, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(config.InfrastructureRole) + **out = **in + } + } + } if in.ClusterLabels != nil { in, out := &in.ClusterLabels, &out.ClusterLabels *out = make(map[string]string, len(*in)) @@ -105,6 +202,16 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura *out = make([]string, len(*in)) copy(*out, *in) } + if in.InheritedAnnotations != nil { + in, out := &in.InheritedAnnotations, &out.InheritedAnnotations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DownscalerAnnotations != nil { + in, out := &in.DownscalerAnnotations, &out.DownscalerAnnotations + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.NodeReadinessLabel != nil { in, out := &in.NodeReadinessLabel, &out.NodeReadinessLabel *out = make(map[string]string, len(*in)) @@ -126,6 +233,7 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura (*out)[key] = val } } + out.PodEnvironmentConfigMap = in.PodEnvironmentConfigMap return } @@ -236,13 +344,20 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData *out = new(bool) **out = **in } - if in.Sidecars != nil { - in, out := &in.Sidecars, &out.Sidecars + if in.SidecarImages != nil { + in, out := &in.SidecarImages, &out.SidecarImages *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } + if in.SidecarContainers != nil { + in, out := &in.SidecarContainers, &out.SidecarContainers + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } out.PostgresUsersConfiguration = in.PostgresUsersConfiguration in.Kubernetes.DeepCopyInto(&out.Kubernetes) out.PostgresPodResources = in.PostgresPodResources @@ -254,6 +369,7 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData out.LoggingRESTAPI = in.LoggingRESTAPI out.Scalyr = in.Scalyr out.LogicalBackup = in.LogicalBackup + in.ConnectionPooler.DeepCopyInto(&out.ConnectionPooler) return } @@ -413,9 +529,34 @@ func (in *PostgresPodResourcesDefaults) DeepCopy() *PostgresPodResourcesDefaults func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { *out = *in in.PostgresqlParam.DeepCopyInto(&out.PostgresqlParam) - out.Volume = in.Volume + in.Volume.DeepCopyInto(&out.Volume) in.Patroni.DeepCopyInto(&out.Patroni) out.Resources = in.Resources + if in.EnableConnectionPooler != nil { + in, out := &in.EnableConnectionPooler, &out.EnableConnectionPooler + *out = new(bool) + **out = **in + } + if in.EnableReplicaConnectionPooler != nil { + in, out := &in.EnableReplicaConnectionPooler, &out.EnableReplicaConnectionPooler + *out = new(bool) + **out = **in + } + if in.ConnectionPooler != nil { + in, out := &in.ConnectionPooler, &out.ConnectionPooler + *out = new(ConnectionPooler) + (*in).DeepCopyInto(*out) + } + if in.SpiloRunAsUser != nil { + in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser + *out = new(int64) + **out = **in + } + if in.SpiloRunAsGroup != nil { + in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup + *out = new(int64) + **out = **in + } if in.SpiloFSGroup != nil { in, out := &in.SpiloFSGroup, &out.SpiloFSGroup *out = new(int64) @@ -468,7 +609,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - in.Clone.DeepCopyInto(&out.Clone) + if in.Clone != nil { + in, out := &in.Clone, &out.Clone + *out = new(CloneDescription) + (*in).DeepCopyInto(*out) + } if in.Databases != nil { in, out := &in.Databases, &out.Databases *out = make(map[string]string, len(*in)) @@ -476,6 +621,23 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { (*out)[key] = val } } + if in.PreparedDatabases != nil { + in, out := &in.PreparedDatabases, &out.PreparedDatabases + *out = make(map[string]PreparedDatabase, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.SchedulerName != nil { + in, out := &in.SchedulerName, &out.SchedulerName + *out = new(string) + **out = **in + } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(corev1.NodeAffinity) + (*in).DeepCopyInto(*out) + } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations *out = make([]corev1.Toleration, len(*in)) @@ -521,6 +683,18 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { (*out)[key] = val } } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSDescription) + **out = **in + } + if in.AdditionalVolumes != nil { + in, out := &in.AdditionalVolumes, &out.AdditionalVolumes + *out = make([]AdditionalVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.InitContainersOld != nil { in, out := &in.InitContainersOld, &out.InitContainersOld *out = make([]corev1.Container, len(*in)) @@ -557,6 +731,127 @@ func (in *PostgresStatus) DeepCopy() *PostgresStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTeam) DeepCopyInto(out *PostgresTeam) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeam. +func (in *PostgresTeam) DeepCopy() *PostgresTeam { + if in == nil { + return nil + } + out := new(PostgresTeam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresTeam) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTeamList) DeepCopyInto(out *PostgresTeamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresTeam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamList. +func (in *PostgresTeamList) DeepCopy() *PostgresTeamList { + if in == nil { + return nil + } + out := new(PostgresTeamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresTeamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) { + *out = *in + if in.AdditionalSuperuserTeams != nil { + in, out := &in.AdditionalSuperuserTeams, &out.AdditionalSuperuserTeams + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AdditionalTeams != nil { + in, out := &in.AdditionalTeams, &out.AdditionalTeams + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AdditionalMembers != nil { + in, out := &in.AdditionalMembers, &out.AdditionalMembers + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamSpec. +func (in *PostgresTeamSpec) DeepCopy() *PostgresTeamSpec { + if in == nil { + return nil + } + out := new(PostgresTeamSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) { *out = *in @@ -657,6 +952,57 @@ func (in *PostgresqlParam) DeepCopy() *PostgresqlParam { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreparedDatabase) DeepCopyInto(out *PreparedDatabase) { + *out = *in + if in.PreparedSchemas != nil { + in, out := &in.PreparedSchemas, &out.PreparedSchemas + *out = make(map[string]PreparedSchema, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreparedDatabase. +func (in *PreparedDatabase) DeepCopy() *PreparedDatabase { + if in == nil { + return nil + } + out := new(PreparedDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreparedSchema) DeepCopyInto(out *PreparedSchema) { + *out = *in + if in.DefaultRoles != nil { + in, out := &in.DefaultRoles, &out.DefaultRoles + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreparedSchema. +func (in *PreparedSchema) DeepCopy() *PreparedSchema { + if in == nil { + return nil + } + out := new(PreparedSchema) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) { *out = *in @@ -752,6 +1098,22 @@ func (in *StandbyDescription) DeepCopy() *StandbyDescription { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSDescription) DeepCopyInto(out *TLSDescription) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSDescription. +func (in *TLSDescription) DeepCopy() *TLSDescription { + if in == nil { + return nil + } + out := new(TLSDescription) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) { *out = *in @@ -808,6 +1170,16 @@ func (in UserFlags) DeepCopy() UserFlags { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(int64) + **out = **in + } + if in.Throughput != nil { + in, out := &in.Throughput, &out.Throughput + *out = new(int64) + **out = **in + } return } diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 87ff123a1..a2effc623 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -3,25 +3,21 @@ package cluster // Postgres CustomResourceDefinition object i.e. Spilo import ( + "context" "database/sql" - "encoding/json" "fmt" "reflect" "regexp" + "strings" "sync" "time" "github.com/sirupsen/logrus" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - policybeta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + + "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" "github.com/zalando/postgres-operator/pkg/spec" + pgteams "github.com/zalando/postgres-operator/pkg/teams" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" @@ -29,7 +25,17 @@ import ( "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/users" + "github.com/zalando/postgres-operator/pkg/util/volumes" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/reference" ) var ( @@ -43,6 +49,7 @@ var ( type Config struct { OpConfig config.Config RestConfig *rest.Config + PgTeamMap pgteams.PostgresTeamMap InfrastructureRoles map[string]spec.PgUser // inherited from the controller PodServiceAccount *v1.ServiceAccount PodServiceAccountRole *rbacv1.Role @@ -65,6 +72,7 @@ type Cluster struct { acidv1.Postgresql Config logger *logrus.Entry + eventRecorder record.EventRecorder patroni patroni.Interface pgUsers map[string]spec.PgUser systemUsers map[string]spec.PgUser @@ -73,7 +81,7 @@ type Cluster struct { pgDb *sql.DB mu sync.Mutex userSyncStrategy spec.UserSyncer - deleteOptions *metav1.DeleteOptions + deleteOptions metav1.DeleteOptions podEventsQueue *cache.FIFO teamsAPIClient teams.Interface @@ -82,7 +90,9 @@ type Cluster struct { currentProcess Process processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex - + ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects + EBSVolumes map[string]volumes.VolumeProperties + VolumeResizer volumes.VolumeResizer } type compareStatefulsetResult struct { @@ -93,7 +103,7 @@ type compareStatefulsetResult struct { } // New creates a new cluster. This function should be called from a controller. -func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry) *Cluster { +func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry, eventRecorder record.EventRecorder) *Cluster { deletePropagationPolicy := metav1.DeletePropagationOrphan podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) { @@ -104,6 +114,10 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil }) + passwordEncryption, ok := pgSpec.Spec.PostgresqlParam.Parameters["password_encryption"] + if !ok { + passwordEncryption = "md5" + } cluster := &Cluster{ Config: cfg, @@ -115,8 +129,8 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres Secrets: make(map[types.UID]*v1.Secret), Services: make(map[PostgresRole]*v1.Service), Endpoints: make(map[PostgresRole]*v1.Endpoints)}, - userSyncStrategy: users.DefaultUserSyncStrategy{}, - deleteOptions: &metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy}, + userSyncStrategy: users.DefaultUserSyncStrategy{PasswordEncryption: passwordEncryption}, + deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy}, podEventsQueue: podEventsQueue, KubeClient: kubeClient, } @@ -124,6 +138,13 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres cluster.teamsAPIClient = teams.NewTeamsAPI(cfg.OpConfig.TeamsAPIUrl, logger) cluster.oauthTokenGetter = newSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName) cluster.patroni = patroni.New(cluster.logger) + cluster.eventRecorder = eventRecorder + + cluster.EBSVolumes = make(map[string]volumes.VolumeProperties) + if cfg.OpConfig.StorageResizeMode != "pvc" || cfg.OpConfig.EnableEBSGp3Migration { + cluster.VolumeResizer = &volumes.EBSVolumeResizer{AWSRegion: cfg.OpConfig.AWSRegion} + + } return cluster } @@ -150,31 +171,14 @@ func (c *Cluster) setProcessName(procName string, args ...interface{}) { } } -// SetStatus of Postgres cluster -// TODO: eventually switch to updateStatus() for kubernetes 1.11 and above -func (c *Cluster) setStatus(status string) { - var pgStatus acidv1.PostgresStatus - pgStatus.PostgresClusterStatus = status - - patch, err := json.Marshal(struct { - PgStatus interface{} `json:"status"` - }{&pgStatus}) - +// GetReference of Postgres CR object +// i.e. required to emit events to this resource +func (c *Cluster) GetReference() *v1.ObjectReference { + ref, err := reference.GetReference(scheme.Scheme, &c.Postgresql) if err != nil { - c.logger.Errorf("could not marshal status: %v", err) + c.logger.Errorf("could not get reference for Postgresql CR %v/%v: %v", c.Postgresql.Namespace, c.Postgresql.Name, err) } - - // we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ), - // however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11) - // we should take advantage of it. - newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.clusterNamespace()).Patch(c.Name, types.MergePatchType, patch, "status") - if err != nil { - c.logger.Errorf("could not update status: %v", err) - // return as newspec is empty, see PR654 - return - } - // update the spec, maintaining the new resourceVersion. - c.setSpec(newspec) + return ref } func (c *Cluster) isNewCluster() bool { @@ -185,7 +189,8 @@ func (c *Cluster) isNewCluster() bool { func (c *Cluster) initUsers() error { c.setProcessName("initializing users") - // clear our the previous state of the cluster users (in case we are running a sync). + // clear our the previous state of the cluster users (in case we are + // running a sync). c.systemUsers = map[string]spec.PgUser{} c.pgUsers = map[string]spec.PgUser{} @@ -195,6 +200,10 @@ func (c *Cluster) initUsers() error { return fmt.Errorf("could not init infrastructure roles: %v", err) } + if err := c.initPreparedDatabaseRoles(); err != nil { + return fmt.Errorf("could not init default users: %v", err) + } + if err := c.initRobotUsers(); err != nil { return fmt.Errorf("could not init robot users: %v", err) } @@ -220,13 +229,14 @@ func (c *Cluster) Create() error { defer func() { if err == nil { - c.setStatus(acidv1.ClusterStatusRunning) //TODO: are you sure it's running? + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running? } else { - c.setStatus(acidv1.ClusterStatusAddFailed) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed) } }() - c.setStatus(acidv1.ClusterStatusCreating) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating) + c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources") if err = c.enforceMinResourceLimits(&c.Spec); err != nil { return fmt.Errorf("could not enforce minimum resource limits: %v", err) @@ -239,12 +249,13 @@ func (c *Cluster) Create() error { } if role == Master { // replica endpoint will be created by the replica service. Master endpoint needs to be created by us, - // since the corresponding master service doesn't define any selectors. + // since the corresponding master service does not define any selectors. ep, err = c.createEndpoint(role) if err != nil { return fmt.Errorf("could not create %s endpoint: %v", role, err) } c.logger.Infof("endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta)) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Endpoints", "Endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta)) } if c.Services[role] != nil { @@ -255,6 +266,7 @@ func (c *Cluster) Create() error { return fmt.Errorf("could not create %s service: %v", role, err) } c.logger.Infof("%s service %q has been successfully created", role, util.NameFromMeta(service.ObjectMeta)) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Services", "The service %q for role %s has been successfully created", util.NameFromMeta(service.ObjectMeta), role) } if err = c.initUsers(); err != nil { @@ -266,6 +278,7 @@ func (c *Cluster) Create() error { return fmt.Errorf("could not create secrets: %v", err) } c.logger.Infof("secrets have been successfully created") + c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Secrets", "The secrets have been successfully created") if c.PodDisruptionBudget != nil { return fmt.Errorf("pod disruption budget already exists in the cluster") @@ -284,6 +297,7 @@ func (c *Cluster) Create() error { return fmt.Errorf("could not create statefulset: %v", err) } c.logger.Infof("statefulset %q has been successfully created", util.NameFromMeta(ss.ObjectMeta)) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Statefulset %q has been successfully created", util.NameFromMeta(ss.ObjectMeta)) c.logger.Info("waiting for the cluster being ready") @@ -292,9 +306,12 @@ func (c *Cluster) Create() error { return err } c.logger.Infof("pods are ready") + c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready") - // create database objects unless we are running without pods or disabled that feature explicitly + // create database objects unless we are running without pods or disabled + // that feature explicitly if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) { + c.logger.Infof("Create roles") if err = c.createRoles(); err != nil { return fmt.Errorf("could not create users: %v", err) } @@ -303,6 +320,9 @@ func (c *Cluster) Create() error { if err = c.syncDatabases(); err != nil { return fmt.Errorf("could not sync databases: %v", err) } + if err = c.syncPreparedDatabases(); err != nil { + return fmt.Errorf("could not sync prepared databases: %v", err) + } c.logger.Infof("databases have been successfully created") } @@ -317,6 +337,14 @@ func (c *Cluster) Create() error { c.logger.Errorf("could not list resources: %v", err) } + // Create connection pooler deployment and services if necessary. Since we + // need to perform some operations with the database itself (e.g. install + // lookup function), do it as the last step, when everything is available. + // + // Do not consider connection pooler as a strict requirement, and if + // something fails, report warning + c.createConnectionPooler(c.installLookupFunction) + return nil } @@ -328,11 +356,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa //TODO: improve me if *c.Statefulset.Spec.Replicas != *statefulSet.Spec.Replicas { match = false - reasons = append(reasons, "new statefulset's number of replicas doesn't match the current one") + reasons = append(reasons, "new statefulset's number of replicas does not match the current one") } if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) { match = false - reasons = append(reasons, "new statefulset's annotations doesn't match the current one") + reasons = append(reasons, "new statefulset's annotations does not match the current one") } needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons) @@ -349,24 +377,24 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's serviceAccountName service account name doesn't match the current one") + reasons = append(reasons, "new statefulset's serviceAccountName service account name does not match the current one") } if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds doesn't match the current one") + reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds does not match the current one") } if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Affinity, statefulSet.Spec.Template.Spec.Affinity) { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's pod affinity doesn't match the current one") + reasons = append(reasons, "new statefulset's pod affinity does not match the current one") } // Some generated fields like creationTimestamp make it not possible to use DeepCompare on Spec.Template.ObjectMeta if !reflect.DeepEqual(c.Statefulset.Spec.Template.Labels, statefulSet.Spec.Template.Labels) { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's metadata labels doesn't match the current one") + reasons = append(reasons, "new statefulset's metadata labels does not match the current one") } if (c.Statefulset.Spec.Selector != nil) && (statefulSet.Spec.Selector != nil) { if !reflect.DeepEqual(c.Statefulset.Spec.Selector.MatchLabels, statefulSet.Spec.Selector.MatchLabels) { @@ -377,7 +405,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa return &compareStatefulsetResult{} } needsReplace = true - reasons = append(reasons, "new statefulset's selector doesn't match the current one") + reasons = append(reasons, "new statefulset's selector does not match the current one") } } @@ -385,7 +413,13 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa match = false needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's pod template metadata annotations doesn't match the current one") + reasons = append(reasons, "new statefulset's pod template metadata annotations does not match the current one") + } + if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.SecurityContext, statefulSet.Spec.Template.Spec.SecurityContext) { + match = false + needsReplace = true + needsRollUpdate = true + reasons = append(reasons, "new statefulset's pod template security context in spec does not match the current one") } if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) { needsReplace = true @@ -396,20 +430,36 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa // Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta if name != statefulSet.Spec.VolumeClaimTemplates[i].Name { needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d doesn't match the current one", i)) + reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i)) continue } if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) { needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q doesn't match the current one", name)) + reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q does not match the current one", name)) } if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) { name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q doesn't match the current one", name)) + reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q does not match the current one", name)) } } + // we assume any change in priority happens by rolling out a new priority class + // changing the priority value in an existing class is not supproted + if c.Statefulset.Spec.Template.Spec.PriorityClassName != statefulSet.Spec.Template.Spec.PriorityClassName { + match = false + needsReplace = true + needsRollUpdate = true + reasons = append(reasons, "new statefulset's pod priority class in spec does not match the current one") + } + + // lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image + // until they are re-created for other reasons, for example node rotation + if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) { + needsReplace = true + reasons = append(reasons, "lazy Spilo update: new statefulset's pod image does not match the current one") + } + if needsRollUpdate || needsReplace { match = false } @@ -439,18 +489,23 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe } checks := []containerCheck{ - newCheck("new statefulset %s's %s (index %d) name doesn't match the current one", + newCheck("new statefulset %s's %s (index %d) name does not match the current one", func(a, b v1.Container) bool { return a.Name != b.Name }), - newCheck("new statefulset %s's %s (index %d) image doesn't match the current one", - func(a, b v1.Container) bool { return a.Image != b.Image }), - newCheck("new statefulset %s's %s (index %d) ports don't match the current one", + newCheck("new statefulset %s's %s (index %d) ports do not match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }), - newCheck("new statefulset %s's %s (index %d) resources don't match the current ones", + newCheck("new statefulset %s's %s (index %d) resources do not match the current ones", func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }), - newCheck("new statefulset %s's %s (index %d) environment doesn't match the current one", + newCheck("new statefulset %s's %s (index %d) environment does not match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }), - newCheck("new statefulset %s's %s (index %d) environment sources don't match the current one", + newCheck("new statefulset %s's %s (index %d) environment sources do not match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }), + newCheck("new statefulset %s's %s (index %d) security context does not match the current one", + func(a, b v1.Container) bool { return !reflect.DeepEqual(a.SecurityContext, b.SecurityContext) }), + } + + if !c.OpConfig.EnableLazySpiloUpgrade { + checks = append(checks, newCheck("new statefulset %s's %s (index %d) image does not match the current one", + func(a, b v1.Container) bool { return a.Image != b.Image })) } for index, containerA := range setA { @@ -514,7 +569,8 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err) } if isSmaller { - c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit) + c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be increased", cpuLimit, minCPULimit) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit) spec.Resources.ResourceLimits.CPU = minCPULimit } } @@ -526,7 +582,8 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err) } if isSmaller { - c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit) + c.logger.Warningf("defined memory limit %s is below required minimum %s and will be increased", memoryLimit, minMemoryLimit) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit) spec.Resources.ResourceLimits.Memory = minMemoryLimit } } @@ -540,38 +597,53 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { // for a cluster that had no such job before. In this case a missing job is not an error. func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { updateFailed := false + syncStatetfulSet := false c.mu.Lock() defer c.mu.Unlock() - c.setStatus(acidv1.ClusterStatusUpdating) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating) c.setSpec(newSpec) defer func() { if updateFailed { - c.setStatus(acidv1.ClusterStatusUpdateFailed) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed) } else { - c.setStatus(acidv1.ClusterStatusRunning) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) } }() - if oldSpec.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison - c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PgVersion, newSpec.Spec.PgVersion) - //we need that hack to generate statefulset with the old version - newSpec.Spec.PgVersion = oldSpec.Spec.PgVersion + logNiceDiff(c.logger, oldSpec, newSpec) + + if oldSpec.Spec.PostgresqlParam.PgVersion > newSpec.Spec.PostgresqlParam.PgVersion { + c.logger.Warningf("postgresql version change(%q -> %q) has no effect", + oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect", + oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) + // we need that hack to generate statefulset with the old version + newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion + } else if oldSpec.Spec.PostgresqlParam.PgVersion < newSpec.Spec.PostgresqlParam.PgVersion { + c.logger.Infof("postgresql version increased (%q -> %q), major version upgrade can be done manually after StatefulSet Sync", + oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) + syncStatetfulSet = true } // Service if !reflect.DeepEqual(c.generateService(Master, &oldSpec.Spec), c.generateService(Master, &newSpec.Spec)) || !reflect.DeepEqual(c.generateService(Replica, &oldSpec.Spec), c.generateService(Replica, &newSpec.Spec)) { - c.logger.Debugf("syncing services") if err := c.syncServices(); err != nil { c.logger.Errorf("could not sync services: %v", err) updateFailed = true } } - if !reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) { + // connection pooler needs one system user created, which is done in + // initUsers. Check if it needs to be called. + sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) && + reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases) + needConnectionPooler := needMasterConnectionPoolerWorker(&newSpec.Spec) || + needReplicaConnectionPoolerWorker(&newSpec.Spec) + if !sameUsers || needConnectionPooler { c.logger.Debugf("syncing secrets") if err := c.initUsers(); err != nil { c.logger.Errorf("could not init users: %v", err) @@ -588,14 +660,10 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } // Volume - if oldSpec.Spec.Size != newSpec.Spec.Size { - c.logger.Debugf("syncing persistent volumes") - c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume) - - if err := c.syncVolumes(); err != nil { - c.logger.Errorf("could not sync persistent volumes: %v", err) - updateFailed = true - } + if c.OpConfig.StorageResizeMode != "off" { + c.syncVolumes() + } else { + c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") } // Statefulset @@ -622,9 +690,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { updateFailed = true return } - - if !reflect.DeepEqual(oldSs, newSs) { + if syncStatetfulSet || !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) { c.logger.Debugf("syncing statefulsets") + syncStatetfulSet = false // TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet if err := c.syncStatefulSet(); err != nil { c.logger.Errorf("could not sync statefulsets: %v", err) @@ -686,18 +754,51 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { c.logger.Errorf("could not sync roles: %v", err) updateFailed = true } - if !reflect.DeepEqual(oldSpec.Spec.Databases, newSpec.Spec.Databases) { + if !reflect.DeepEqual(oldSpec.Spec.Databases, newSpec.Spec.Databases) || + !reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases) { c.logger.Infof("syncing databases") if err := c.syncDatabases(); err != nil { c.logger.Errorf("could not sync databases: %v", err) updateFailed = true } } + if !reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases) { + c.logger.Infof("syncing prepared databases") + if err := c.syncPreparedDatabases(); err != nil { + c.logger.Errorf("could not sync prepared databases: %v", err) + updateFailed = true + } + } + } + + // Sync connection pooler. Before actually doing sync reset lookup + // installation flag, since manifest updates could add another db which we + // need to process. In the future we may want to do this more careful and + // check which databases we need to process, but even repeating the whole + // installation process should be good enough. + + if _, err := c.syncConnectionPooler(oldSpec, newSpec, c.installLookupFunction); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + updateFailed = true } return nil } +func syncResources(a, b *v1.ResourceRequirements) bool { + for _, res := range []v1.ResourceName{ + v1.ResourceCPU, + v1.ResourceMemory, + } { + if !a.Limits[res].Equal(b.Limits[res]) || + !a.Requests[res].Equal(b.Requests[res]) { + return true + } + } + + return false +} + // Delete deletes the cluster and cleans up all objects associated with it (including statefulsets). // The deletion order here is somewhat significant, because Patroni, when running with the Kubernetes // DCS, reuses the master's endpoint to store the leader related metadata. If we remove the endpoint @@ -706,6 +807,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { func (c *Cluster) Delete() { c.mu.Lock() defer c.mu.Unlock() + c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Delete", "Started deletion of new cluster resources") // delete the backup job before the stateful set of the cluster to prevent connections to non-existing pods // deleting the cron job also removes pods and batch jobs it created @@ -717,14 +819,8 @@ func (c *Cluster) Delete() { c.logger.Warningf("could not delete statefulset: %v", err) } - for _, obj := range c.Secrets { - if doDelete, user := c.shouldDeleteSecret(obj); !doDelete { - c.logger.Warningf("not removing secret %q for the system user %q", obj.GetName(), user) - continue - } - if err := c.deleteSecret(obj); err != nil { - c.logger.Warningf("could not delete secret: %v", err) - } + if err := c.deleteSecrets(); err != nil { + c.logger.Warningf("could not delete secrets: %v", err) } if err := c.deletePodDisruptionBudget(); err != nil { @@ -733,8 +829,10 @@ func (c *Cluster) Delete() { for _, role := range []PostgresRole{Master, Replica} { - if err := c.deleteEndpoint(role); err != nil { - c.logger.Warningf("could not delete %s endpoint: %v", role, err) + if !c.patroniKubernetesUseConfigMaps() { + if err := c.deleteEndpoint(role); err != nil { + c.logger.Warningf("could not delete %s endpoint: %v", role, err) + } } if err := c.deleteService(role); err != nil { @@ -746,6 +844,15 @@ func (c *Cluster) Delete() { c.logger.Warningf("could not remove leftover patroni objects; %v", err) } + // Delete connection pooler objects anyway, even if it's not mentioned in the + // manifest, just to not keep orphaned components in case if something went + // wrong + for _, role := range [2]PostgresRole{Master, Replica} { + if err := c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } + } //NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status). @@ -812,6 +919,151 @@ func (c *Cluster) initSystemUsers() { Name: c.OpConfig.ReplicationUsername, Password: util.RandomPassword(constants.PasswordLength), } + + // Connection pooler user is an exception, if requested it's going to be + // created by operator as a normal pgUser + if needConnectionPooler(&c.Spec) { + // initialize empty connection pooler if not done yet + if c.Spec.ConnectionPooler == nil { + c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + + // Using superuser as pooler user is not a good idea. First of all it's + // not going to be synced correctly with the current implementation, + // and second it's a bad practice. + username := c.OpConfig.ConnectionPooler.User + + isSuperUser := c.Spec.ConnectionPooler.User == c.OpConfig.SuperUsername + isProtectedUser := c.shouldAvoidProtectedOrSystemRole( + c.Spec.ConnectionPooler.User, "connection pool role") + + if !isSuperUser && !isProtectedUser { + username = util.Coalesce( + c.Spec.ConnectionPooler.User, + c.OpConfig.ConnectionPooler.User) + } + + // connection pooler application should be able to login with this role + connectionPoolerUser := spec.PgUser{ + Origin: spec.RoleConnectionPooler, + Name: username, + Flags: []string{constants.RoleFlagLogin}, + Password: util.RandomPassword(constants.PasswordLength), + } + + if _, exists := c.pgUsers[username]; !exists { + c.pgUsers[username] = connectionPoolerUser + } + + if _, exists := c.systemUsers[constants.ConnectionPoolerUserKeyName]; !exists { + c.systemUsers[constants.ConnectionPoolerUserKeyName] = connectionPoolerUser + } + } +} + +func (c *Cluster) initPreparedDatabaseRoles() error { + + if c.Spec.PreparedDatabases != nil && len(c.Spec.PreparedDatabases) == 0 { // TODO: add option to disable creating such a default DB + c.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{strings.Replace(c.Name, "-", "_", -1): {}} + } + + // create maps with default roles/users as keys and their membership as values + defaultRoles := map[string]string{ + constants.OwnerRoleNameSuffix: "", + constants.ReaderRoleNameSuffix: "", + constants.WriterRoleNameSuffix: constants.ReaderRoleNameSuffix, + } + defaultUsers := map[string]string{ + constants.OwnerRoleNameSuffix + constants.UserRoleNameSuffix: constants.OwnerRoleNameSuffix, + constants.ReaderRoleNameSuffix + constants.UserRoleNameSuffix: constants.ReaderRoleNameSuffix, + constants.WriterRoleNameSuffix + constants.UserRoleNameSuffix: constants.WriterRoleNameSuffix, + } + + for preparedDbName, preparedDB := range c.Spec.PreparedDatabases { + // get list of prepared schemas to set in search_path + preparedSchemas := preparedDB.PreparedSchemas + if len(preparedDB.PreparedSchemas) == 0 { + preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}} + } + + var searchPath strings.Builder + searchPath.WriteString(constants.DefaultSearchPath) + for preparedSchemaName := range preparedSchemas { + searchPath.WriteString(", " + preparedSchemaName) + } + + // default roles per database + if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String()); err != nil { + return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) + } + if preparedDB.DefaultUsers { + if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String()); err != nil { + return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) + } + } + + // default roles per database schema + for preparedSchemaName, preparedSchema := range preparedSchemas { + if preparedSchema.DefaultRoles == nil || *preparedSchema.DefaultRoles { + if err := c.initDefaultRoles(defaultRoles, + preparedDbName+constants.OwnerRoleNameSuffix, + preparedDbName+"_"+preparedSchemaName, + constants.DefaultSearchPath+", "+preparedSchemaName); err != nil { + return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err) + } + if preparedSchema.DefaultUsers { + if err := c.initDefaultRoles(defaultUsers, + preparedDbName+constants.OwnerRoleNameSuffix, + preparedDbName+"_"+preparedSchemaName, + constants.DefaultSearchPath+", "+preparedSchemaName); err != nil { + return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err) + } + } + } + } + } + return nil +} + +func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string, searchPath string) error { + + for defaultRole, inherits := range defaultRoles { + + roleName := prefix + defaultRole + + flags := []string{constants.RoleFlagNoLogin} + if defaultRole[len(defaultRole)-5:] == constants.UserRoleNameSuffix { + flags = []string{constants.RoleFlagLogin} + } + + memberOf := make([]string, 0) + if inherits != "" { + memberOf = append(memberOf, prefix+inherits) + } + + adminRole := "" + if strings.Contains(defaultRole, constants.OwnerRoleNameSuffix) { + adminRole = admin + } else { + adminRole = prefix + constants.OwnerRoleNameSuffix + } + + newRole := spec.PgUser{ + Origin: spec.RoleOriginBootstrap, + Name: roleName, + Password: util.RandomPassword(constants.PasswordLength), + Flags: flags, + MemberOf: memberOf, + Parameters: map[string]string{"search_path": searchPath}, + AdminRole: adminRole, + } + if currentRole, present := c.pgUsers[roleName]; present { + c.pgUsers[roleName] = c.resolveNameConflict(¤tRole, &newRole) + } else { + c.pgUsers[roleName] = newRole + } + } + return nil } func (c *Cluster) initRobotUsers() error { @@ -861,7 +1113,7 @@ func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) e if c.shouldAvoidProtectedOrSystemRole(username, "API role") { continue } - if c.OpConfig.EnableTeamSuperuser || isPostgresSuperuserTeam { + if (c.OpConfig.EnableTeamSuperuser && teamID == c.Spec.TeamID) || isPostgresSuperuserTeam { flags = append(flags, constants.RoleFlagSuperuser) } else { if c.OpConfig.TeamAdminRole != "" { @@ -890,17 +1142,38 @@ func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) e func (c *Cluster) initHumanUsers() error { var clusterIsOwnedBySuperuserTeam bool + superuserTeams := []string{} + + if c.OpConfig.EnablePostgresTeamCRDSuperusers { + superuserTeams = c.PgTeamMap.GetAdditionalSuperuserTeams(c.Spec.TeamID, true) + } for _, postgresSuperuserTeam := range c.OpConfig.PostgresSuperuserTeams { - err := c.initTeamMembers(postgresSuperuserTeam, true) - if err != nil { - return fmt.Errorf("Cannot create a team %q of Postgres superusers: %v", postgresSuperuserTeam, err) + if !(util.SliceContains(superuserTeams, postgresSuperuserTeam)) { + superuserTeams = append(superuserTeams, postgresSuperuserTeam) } - if postgresSuperuserTeam == c.Spec.TeamID { + } + + for _, superuserTeam := range superuserTeams { + err := c.initTeamMembers(superuserTeam, true) + if err != nil { + return fmt.Errorf("Cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err) + } + if superuserTeam == c.Spec.TeamID { clusterIsOwnedBySuperuserTeam = true } } + additionalTeams := c.PgTeamMap.GetAdditionalTeams(c.Spec.TeamID, true) + for _, additionalTeam := range additionalTeams { + if !(util.SliceContains(superuserTeams, additionalTeam)) { + err := c.initTeamMembers(additionalTeam, false) + if err != nil { + return fmt.Errorf("Cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err) + } + } + } + if clusterIsOwnedBySuperuserTeam { c.logger.Infof("Team %q owning the cluster is also a team of superusers. Created superuser roles for its members instead of admin roles.", c.Spec.TeamID) return nil @@ -908,7 +1181,7 @@ func (c *Cluster) initHumanUsers() error { err := c.initTeamMembers(c.Spec.TeamID, false) if err != nil { - return fmt.Errorf("Cannot create a team %q of admins owning the PG cluster: %v", c.Spec.TeamID, err) + return fmt.Errorf("Cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err) } return nil @@ -996,6 +1269,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e var err error c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate) var wg sync.WaitGroup @@ -1022,6 +1296,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil { c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Successfully switched over from %q to %q", curMaster.Name, candidate) if err = <-podLabelErr; err != nil { err = fmt.Errorf("could not get master pod label: %v", err) } @@ -1037,6 +1312,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e // close the label waiting channel no sooner than the waiting goroutine terminates. close(podLabelErr) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err) return err } @@ -1051,11 +1327,6 @@ func (c *Cluster) Unlock() { c.mu.Unlock() } -func (c *Cluster) shouldDeleteSecret(secret *v1.Secret) (delete bool, userName string) { - secretUser := string(secret.Data["username"]) - return (secretUser != c.OpConfig.ReplicationUsername && secretUser != c.OpConfig.SuperUsername), secretUser -} - type simpleActionWithResult func() error type clusterObjectGet func(name string) (spec.NamespacedName, error) @@ -1064,11 +1335,19 @@ type clusterObjectDelete func(name string) error func (c *Cluster) deletePatroniClusterObjects() error { // TODO: figure out how to remove leftover patroni objects in other cases + var actionsList []simpleActionWithResult + if !c.patroniUsesKubernetes() { c.logger.Infof("not cleaning up Etcd Patroni objects on cluster delete") } - c.logger.Debugf("removing leftover Patroni objects (endpoints, services and configmaps)") - for _, deleter := range []simpleActionWithResult{c.deletePatroniClusterEndpoints, c.deletePatroniClusterServices, c.deletePatroniClusterConfigMaps} { + + if !c.patroniKubernetesUseConfigMaps() { + actionsList = append(actionsList, c.deletePatroniClusterEndpoints) + } + actionsList = append(actionsList, c.deletePatroniClusterServices, c.deletePatroniClusterConfigMaps) + + c.logger.Debugf("removing leftover Patroni objects (endpoints / services and configmaps)") + for _, deleter := range actionsList { if err := deleter(); err != nil { return err } @@ -1088,7 +1367,7 @@ func (c *Cluster) deleteClusterObject( objType, namespacedName) if err = del(name); err != nil { - return fmt.Errorf("could not Patroni delete cluster object %q with name %q: %v", + return fmt.Errorf("could not delete Patroni cluster object %q with name %q: %v", objType, namespacedName, err) } @@ -1102,12 +1381,12 @@ func (c *Cluster) deleteClusterObject( func (c *Cluster) deletePatroniClusterServices() error { get := func(name string) (spec.NamespacedName, error) { - svc, err := c.KubeClient.Services(c.Namespace).Get(name, metav1.GetOptions{}) + svc, err := c.KubeClient.Services(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) return util.NameFromMeta(svc.ObjectMeta), err } deleteServiceFn := func(name string) error { - return c.KubeClient.Services(c.Namespace).Delete(name, c.deleteOptions) + return c.KubeClient.Services(c.Namespace).Delete(context.TODO(), name, c.deleteOptions) } return c.deleteClusterObject(get, deleteServiceFn, "service") @@ -1115,12 +1394,12 @@ func (c *Cluster) deletePatroniClusterServices() error { func (c *Cluster) deletePatroniClusterEndpoints() error { get := func(name string) (spec.NamespacedName, error) { - ep, err := c.KubeClient.Endpoints(c.Namespace).Get(name, metav1.GetOptions{}) + ep, err := c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) return util.NameFromMeta(ep.ObjectMeta), err } deleteEndpointFn := func(name string) error { - return c.KubeClient.Endpoints(c.Namespace).Delete(name, c.deleteOptions) + return c.KubeClient.Endpoints(c.Namespace).Delete(context.TODO(), name, c.deleteOptions) } return c.deleteClusterObject(get, deleteEndpointFn, "endpoint") @@ -1128,12 +1407,12 @@ func (c *Cluster) deletePatroniClusterEndpoints() error { func (c *Cluster) deletePatroniClusterConfigMaps() error { get := func(name string) (spec.NamespacedName, error) { - cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(name, metav1.GetOptions{}) + cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) return util.NameFromMeta(cm.ObjectMeta), err } deleteConfigMapFn := func(name string) error { - return c.KubeClient.ConfigMaps(c.Namespace).Delete(name, c.deleteOptions) + return c.KubeClient.ConfigMaps(c.Namespace).Delete(context.TODO(), name, c.deleteOptions) } return c.deleteClusterObject(get, deleteConfigMapFn, "configmap") diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 9efbc51c6..1f6510e65 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -9,9 +9,11 @@ import ( acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/teams" - v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" ) const ( @@ -20,21 +22,53 @@ const ( ) var logger = logrus.New().WithField("test", "cluster") +var eventRecorder = record.NewFakeRecorder(1) + var cl = New( Config{ OpConfig: config.Config{ - ProtectedRoles: []string{"admin"}, + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, Auth: config.Auth{ SuperUsername: superUserName, ReplicationUsername: replicationUserName, }, + Resources: config.Resources{ + DownscalerAnnotations: []string{"downscaler/*"}, + }, }, }, k8sutil.NewMockKubernetesClient(), - acidv1.Postgresql{}, + acidv1.Postgresql{ObjectMeta: metav1.ObjectMeta{Name: "acid-test", Namespace: "test", Annotations: map[string]string{"downscaler/downtime_replicas": "0"}}}, logger, + eventRecorder, ) +func TestStatefulSetAnnotations(t *testing.T) { + testName := "CheckStatefulsetAnnotations" + spec := acidv1.PostgresSpec{ + TeamID: "myapp", NumberOfInstances: 1, + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + }, + Volume: acidv1.Volume{ + Size: "1G", + }, + } + ss, err := cl.generateStatefulSet(&spec) + if err != nil { + t.Errorf("in %s no statefulset created %v", testName, err) + } + if ss != nil { + annotation := ss.ObjectMeta.GetAnnotations() + if _, ok := annotation["downscaler/downtime_replicas"]; !ok { + t.Errorf("in %s respective annotation not found on sts", testName) + } + } + +} + func TestInitRobotUsers(t *testing.T) { testName := "TestInitRobotUsers" tests := []struct { @@ -299,36 +333,6 @@ func TestInitHumanUsersWithSuperuserTeams(t *testing.T) { } } -func TestShouldDeleteSecret(t *testing.T) { - testName := "TestShouldDeleteSecret" - - tests := []struct { - secret *v1.Secret - outcome bool - }{ - { - secret: &v1.Secret{Data: map[string][]byte{"username": []byte("foobar")}}, - outcome: true, - }, - { - secret: &v1.Secret{Data: map[string][]byte{"username": []byte(superUserName)}}, - - outcome: false, - }, - { - secret: &v1.Secret{Data: map[string][]byte{"username": []byte(replicationUserName)}}, - outcome: false, - }, - } - - for _, tt := range tests { - if outcome, username := cl.shouldDeleteSecret(tt.secret); outcome != tt.outcome { - t.Errorf("%s expects the check for deletion of the username %q secret to return %t, got %t", - testName, username, tt.outcome, outcome) - } - } -} - func TestPodAnnotations(t *testing.T) { testName := "TestPodAnnotations" tests := []struct { @@ -704,3 +708,140 @@ func TestServiceAnnotations(t *testing.T) { }) } } + +func TestInitSystemUsers(t *testing.T) { + testName := "Test system users initialization" + + // default cluster without connection pooler + cl.initSystemUsers() + if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; exist { + t.Errorf("%s, connection pooler user is present", testName) + } + + // cluster with connection pooler + cl.Spec.EnableConnectionPooler = boolToPointer(true) + cl.initSystemUsers() + if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; !exist { + t.Errorf("%s, connection pooler user is not present", testName) + } + + // superuser is not allowed as connection pool user + cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ + User: "postgres", + } + cl.OpConfig.SuperUsername = "postgres" + cl.OpConfig.ConnectionPooler.User = "pooler" + + cl.initSystemUsers() + if _, exist := cl.pgUsers["pooler"]; !exist { + t.Errorf("%s, Superuser is not allowed to be a connection pool user", testName) + } + + // neither protected users are + delete(cl.pgUsers, "pooler") + cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ + User: "admin", + } + cl.OpConfig.ProtectedRoles = []string{"admin"} + + cl.initSystemUsers() + if _, exist := cl.pgUsers["pooler"]; !exist { + t.Errorf("%s, Protected user are not allowed to be a connection pool user", testName) + } + + delete(cl.pgUsers, "pooler") + cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ + User: "standby", + } + + cl.initSystemUsers() + if _, exist := cl.pgUsers["pooler"]; !exist { + t.Errorf("%s, System users are not allowed to be a connection pool user", testName) + } +} + +func TestPreparedDatabases(t *testing.T) { + testName := "TestDefaultPreparedDatabase" + + cl.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{} + cl.initPreparedDatabaseRoles() + + for _, role := range []string{"acid_test_owner", "acid_test_reader", "acid_test_writer", + "acid_test_data_owner", "acid_test_data_reader", "acid_test_data_writer"} { + if _, exist := cl.pgUsers[role]; !exist { + t.Errorf("%s, default role %q for prepared database not present", testName, role) + } + } + + testName = "TestPreparedDatabaseWithSchema" + + cl.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{ + "foo": { + DefaultUsers: true, + PreparedSchemas: map[string]acidv1.PreparedSchema{ + "bar": { + DefaultUsers: true, + }, + }, + }, + } + cl.initPreparedDatabaseRoles() + + for _, role := range []string{ + "foo_owner", "foo_reader", "foo_writer", + "foo_owner_user", "foo_reader_user", "foo_writer_user", + "foo_bar_owner", "foo_bar_reader", "foo_bar_writer", + "foo_bar_owner_user", "foo_bar_reader_user", "foo_bar_writer_user"} { + if _, exist := cl.pgUsers[role]; !exist { + t.Errorf("%s, default role %q for prepared database not present", testName, role) + } + } + + roleTests := []struct { + subTest string + role string + memberOf string + admin string + }{ + { + subTest: "Test admin role of owner", + role: "foo_owner", + memberOf: "", + admin: "admin", + }, + { + subTest: "Test writer is a member of reader", + role: "foo_writer", + memberOf: "foo_reader", + admin: "foo_owner", + }, + { + subTest: "Test reader LOGIN role", + role: "foo_reader_user", + memberOf: "foo_reader", + admin: "foo_owner", + }, + { + subTest: "Test schema owner", + role: "foo_bar_owner", + memberOf: "", + admin: "foo_owner", + }, + { + subTest: "Test schema writer LOGIN role", + role: "foo_bar_writer_user", + memberOf: "foo_bar_writer", + admin: "foo_bar_owner", + }, + } + + for _, tt := range roleTests { + user := cl.pgUsers[tt.role] + if (tt.memberOf == "" && len(user.MemberOf) > 0) || (tt.memberOf != "" && user.MemberOf[0] != tt.memberOf) { + t.Errorf("%s, incorrect membership for default role %q. Expected %q, got %q", tt.subTest, tt.role, tt.memberOf, user.MemberOf[0]) + } + if user.AdminRole != tt.admin { + t.Errorf("%s, incorrect admin role for default role %q. Expected %q, got %q", tt.subTest, tt.role, tt.admin, user.AdminRole) + } + } +} diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go new file mode 100644 index 000000000..db4f1f56d --- /dev/null +++ b/pkg/cluster/connection_pooler.go @@ -0,0 +1,939 @@ +package cluster + +import ( + "context" + "fmt" + "strings" + + "github.com/r3labs/diff" + "github.com/sirupsen/logrus" + acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/constants" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" +) + +// ConnectionPoolerObjects K8s objects that are belong to connection pooler +type ConnectionPoolerObjects struct { + Deployment *appsv1.Deployment + Service *v1.Service + Name string + ClusterName string + Namespace string + Role PostgresRole + // It could happen that a connection pooler was enabled, but the operator + // was not able to properly process a corresponding event or was restarted. + // In this case we will miss missing/require situation and a lookup function + // will not be installed. To avoid synchronizing it all the time to prevent + // this, we can remember the result in memory at least until the next + // restart. + LookupFunction bool + // Careful with referencing cluster.spec this object pointer changes + // during runtime and lifetime of cluster +} + +func (c *Cluster) connectionPoolerName(role PostgresRole) string { + name := c.Name + "-pooler" + if role == Replica { + name = name + "-repl" + } + return name +} + +// isConnectionPoolerEnabled +func needConnectionPooler(spec *acidv1.PostgresSpec) bool { + return needMasterConnectionPoolerWorker(spec) || + needReplicaConnectionPoolerWorker(spec) +} + +func needMasterConnectionPooler(spec *acidv1.PostgresSpec) bool { + return needMasterConnectionPoolerWorker(spec) +} + +func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || + (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) +} + +func needReplicaConnectionPooler(spec *acidv1.PostgresSpec) bool { + return needReplicaConnectionPoolerWorker(spec) +} + +func needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { + return spec.EnableReplicaConnectionPooler != nil && + *spec.EnableReplicaConnectionPooler +} + +// Return connection pooler labels selector, which should from one point of view +// inherit most of the labels from the cluster itself, but at the same time +// have e.g. different `application` label, so that recreatePod operation will +// not interfere with it (it lists all the pods via labels, and if there would +// be no difference, it will recreate also pooler pods). +func (c *Cluster) connectionPoolerLabels(role PostgresRole, addExtraLabels bool) *metav1.LabelSelector { + poolerLabels := c.labelsSet(addExtraLabels) + + // TODO should be config values + poolerLabels["application"] = "db-connection-pooler" + poolerLabels["connection-pooler"] = c.connectionPoolerName(role) + + if addExtraLabels { + extraLabels := map[string]string{} + extraLabels[c.OpConfig.PodRoleLabel] = string(role) + + poolerLabels = labels.Merge(poolerLabels, extraLabels) + } + + return &metav1.LabelSelector{ + MatchLabels: poolerLabels, + MatchExpressions: nil, + } +} + +// Prepare the database for connection pooler to be used, i.e. install lookup +// function (do it first, because it should be fast and if it didn't succeed, +// it doesn't makes sense to create more K8S objects. At this moment we assume +// that necessary connection pooler user exists. +// +// After that create all the objects for connection pooler, namely a deployment +// with a chosen pooler and a service to expose it. + +// have connectionpooler name in the cp object to have it immutable name +// add these cp related functions to a new cp file +// opConfig, cluster, and database name +func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncReason, error) { + var reason SyncReason + c.setProcessName("creating connection pooler") + + //this is essentially sync with nil as oldSpec + if reason, err := c.syncConnectionPooler(nil, &c.Postgresql, LookupFunction); err != nil { + return reason, err + } + return reason, nil +} + +// +// Generate pool size related environment variables. +// +// MAX_DB_CONN would specify the global maximum for connections to a target +// database. +// +// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough. +// +// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when +// most of the queries coming through a connection pooler are from the same +// user to the same db). In case if we want to spin up more connection pooler +// instances, take this into account and maintain the same number of +// connections. +// +// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload +// have to wait for spinning up a new connections. +// +// RESERVE_SIZE is how many additional connections to allow for a pooler. +func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar { + spec := &c.Spec + effectiveMode := util.Coalesce( + spec.ConnectionPooler.Mode, + c.OpConfig.ConnectionPooler.Mode) + + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + effectiveMaxDBConn := util.CoalesceInt32( + spec.ConnectionPooler.MaxDBConnections, + c.OpConfig.ConnectionPooler.MaxDBConnections) + + if effectiveMaxDBConn == nil { + effectiveMaxDBConn = k8sutil.Int32ToPointer( + constants.ConnectionPoolerMaxDBConnections) + } + + maxDBConn := *effectiveMaxDBConn / *numberOfInstances + + defaultSize := maxDBConn / 2 + minSize := defaultSize / 2 + reserveSize := minSize + + return []v1.EnvVar{ + { + Name: "CONNECTION_POOLER_PORT", + Value: fmt.Sprint(pgPort), + }, + { + Name: "CONNECTION_POOLER_MODE", + Value: effectiveMode, + }, + { + Name: "CONNECTION_POOLER_DEFAULT_SIZE", + Value: fmt.Sprint(defaultSize), + }, + { + Name: "CONNECTION_POOLER_MIN_SIZE", + Value: fmt.Sprint(minSize), + }, + { + Name: "CONNECTION_POOLER_RESERVE_SIZE", + Value: fmt.Sprint(reserveSize), + }, + { + Name: "CONNECTION_POOLER_MAX_CLIENT_CONN", + Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections), + }, + { + Name: "CONNECTION_POOLER_MAX_DB_CONN", + Value: fmt.Sprint(maxDBConn), + }, + } +} + +func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( + *v1.PodTemplateSpec, error) { + spec := &c.Spec + gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) + resources, err := generateResourceRequirements( + spec.ConnectionPooler.Resources, + makeDefaultConnectionPoolerResources(&c.OpConfig)) + + effectiveDockerImage := util.Coalesce( + spec.ConnectionPooler.DockerImage, + c.OpConfig.ConnectionPooler.Image) + + effectiveSchema := util.Coalesce( + spec.ConnectionPooler.Schema, + c.OpConfig.ConnectionPooler.Schema) + + if err != nil { + return nil, fmt.Errorf("could not generate resource requirements: %v", err) + } + + secretSelector := func(key string) *v1.SecretKeySelector { + effectiveUser := util.Coalesce( + spec.ConnectionPooler.User, + c.OpConfig.ConnectionPooler.User) + + return &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: c.credentialSecretName(effectiveUser), + }, + Key: key, + } + } + + envVars := []v1.EnvVar{ + { + Name: "PGHOST", + Value: c.serviceAddress(role), + }, + { + Name: "PGPORT", + Value: c.servicePort(role), + }, + { + Name: "PGUSER", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("username"), + }, + }, + // the convention is to use the same schema name as + // connection pooler username + { + Name: "PGSCHEMA", + Value: effectiveSchema, + }, + { + Name: "PGPASSWORD", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: secretSelector("password"), + }, + }, + } + envVars = append(envVars, c.getConnectionPoolerEnvVars()...) + + poolerContainer := v1.Container{ + Name: connectionPoolerContainer, + Image: effectiveDockerImage, + ImagePullPolicy: v1.PullIfNotPresent, + Resources: *resources, + Ports: []v1.ContainerPort{ + { + ContainerPort: pgPort, + Protocol: v1.ProtocolTCP, + }, + }, + Env: envVars, + ReadinessProbe: &v1.Probe{ + Handler: v1.Handler{ + TCPSocket: &v1.TCPSocketAction{ + Port: intstr.IntOrString{IntVal: pgPort}, + }, + }, + }, + SecurityContext: &v1.SecurityContext{ + AllowPrivilegeEscalation: util.False(), + }, + } + + podTemplate := &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: c.connectionPoolerLabels(role, true).MatchLabels, + Namespace: c.Namespace, + Annotations: c.annotationsSet(c.generatePodAnnotations(spec)), + }, + Spec: v1.PodSpec{ + TerminationGracePeriodSeconds: &gracePeriod, + Containers: []v1.Container{poolerContainer}, + // TODO: add tolerations to scheduler pooler on the same node + // as database + //Tolerations: *tolerationsSpec, + }, + } + + return podTemplate, nil +} + +func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *ConnectionPoolerObjects) ( + *appsv1.Deployment, error) { + spec := &c.Spec + + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + podTemplate, err := c.generateConnectionPoolerPodTemplate(connectionPooler.Role) + + numberOfInstances := spec.ConnectionPooler.NumberOfInstances + if numberOfInstances == nil { + numberOfInstances = util.CoalesceInt32( + c.OpConfig.ConnectionPooler.NumberOfInstances, + k8sutil.Int32ToPointer(1)) + } + + if *numberOfInstances < constants.ConnectionPoolerMinInstances { + msg := "Adjusted number of connection pooler instances from %d to %d" + c.logger.Warningf(msg, *numberOfInstances, constants.ConnectionPoolerMinInstances) + + *numberOfInstances = constants.ConnectionPoolerMinInstances + } + + if err != nil { + return nil, err + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: connectionPooler.Name, + Namespace: connectionPooler.Namespace, + Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels, + Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this deployment, but there is a hope that this object + // will be garbage collected if something went wrong and operator + // didn't deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: numberOfInstances, + Selector: c.connectionPoolerLabels(connectionPooler.Role, false), + Template: *podTemplate, + }, + } + + return deployment, nil +} + +func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPoolerObjects) *v1.Service { + + spec := &c.Spec + // there are two ways to enable connection pooler, either to specify a + // connectionPooler section or enableConnectionPooler. In the second case + // spec.connectionPooler will be nil, so to make it easier to calculate + // default values, initialize it to an empty structure. It could be done + // anywhere, but here is the earliest common entry point between sync and + // create code, so init here. + if spec.ConnectionPooler == nil { + spec.ConnectionPooler = &acidv1.ConnectionPooler{} + } + + serviceSpec := v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: connectionPooler.Name, + Port: pgPort, + TargetPort: intstr.IntOrString{StrVal: c.servicePort(connectionPooler.Role)}, + }, + }, + Type: v1.ServiceTypeClusterIP, + Selector: map[string]string{ + "connection-pooler": c.connectionPoolerName(connectionPooler.Role), + }, + } + + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: connectionPooler.Name, + Namespace: connectionPooler.Namespace, + Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels, + Annotations: c.annotationsSet(c.generateServiceAnnotations(connectionPooler.Role, spec)), + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Orphaned" + // propagation policy, which means that it's deletion will not + // clean up this service, but there is a hope that this object will + // be garbage collected if something went wrong and operator didn't + // deleted it. + OwnerReferences: c.ownerReferences(), + }, + Spec: serviceSpec, + } + + return service +} + +//delete connection pooler +func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { + c.logger.Infof("deleting connection pooler spilo-role=%s", role) + + // Lack of connection pooler objects is not a fatal error, just log it if + // it was present before in the manifest + if c.ConnectionPooler[role] == nil || role == "" { + c.logger.Debugf("no connection pooler to delete") + return nil + } + + // Clean up the deployment object. If deployment resource we've remembered + // is somehow empty, try to delete based on what would we generate + var deployment *appsv1.Deployment + deployment = c.ConnectionPooler[role].Deployment + + policy := metav1.DeletePropagationForeground + options := metav1.DeleteOptions{PropagationPolicy: &policy} + + if deployment != nil { + + // set delete propagation policy to foreground, so that replica set will be + // also deleted. + + err = c.KubeClient. + Deployments(c.Namespace). + Delete(context.TODO(), deployment.Name, options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("connection pooler deployment was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete connection pooler deployment: %v", err) + } + + c.logger.Infof("connection pooler deployment %s has been deleted for role %s", deployment.Name, role) + } + + // Repeat the same for the service object + var service *v1.Service + service = c.ConnectionPooler[role].Service + if service == nil { + c.logger.Debugf("no connection pooler service object to delete") + } else { + + err = c.KubeClient. + Services(c.Namespace). + Delete(context.TODO(), service.Name, options) + + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("connection pooler service was already deleted") + } else if err != nil { + return fmt.Errorf("could not delete connection pooler service: %v", err) + } + + c.logger.Infof("connection pooler service %s has been deleted for role %s", service.Name, role) + } + + c.ConnectionPooler[role].Deployment = nil + c.ConnectionPooler[role].Service = nil + return nil +} + +//delete connection pooler +func (c *Cluster) deleteConnectionPoolerSecret() (err error) { + // Repeat the same for the secret object + secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User) + + secret, err := c.KubeClient. + Secrets(c.Namespace). + Get(context.TODO(), secretName, metav1.GetOptions{}) + + if err != nil { + c.logger.Debugf("could not get connection pooler secret %s: %v", secretName, err) + } else { + if err = c.deleteSecret(secret.UID, *secret); err != nil { + return fmt.Errorf("could not delete pooler secret: %v", err) + } + } + return nil +} + +// Perform actual patching of a connection pooler deployment, assuming that all +// the check were already done before. +func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) { + if newDeployment == nil { + return nil, fmt.Errorf("there is no connection pooler in the cluster") + } + + patchData, err := specPatch(newDeployment.Spec) + if err != nil { + return nil, fmt.Errorf("could not form patch for the connection pooler deployment: %v", err) + } + + // An update probably requires RetryOnConflict, but since only one operator + // worker at one time will try to update it chances of conflicts are + // minimal. + deployment, err := KubeClient. + Deployments(newDeployment.Namespace).Patch( + context.TODO(), + newDeployment.Name, + types.MergePatchType, + patchData, + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch connection pooler deployment: %v", err) + } + + return deployment, nil +} + +//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment +func updateConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, deployment *appsv1.Deployment, annotations map[string]string) (*appsv1.Deployment, error) { + patchData, err := metaAnnotationsPatch(annotations) + if err != nil { + return nil, fmt.Errorf("could not form patch for the connection pooler deployment metadata: %v", err) + } + result, err := KubeClient.Deployments(deployment.Namespace).Patch( + context.TODO(), + deployment.Name, + types.MergePatchType, + []byte(patchData), + metav1.PatchOptions{}, + "") + if err != nil { + return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err) + } + return result, nil + +} + +// Test if two connection pooler configuration needs to be synced. For simplicity +// compare not the actual K8S objects, but the configuration itself and request +// sync if there is any difference. +func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler, logger *logrus.Entry) (sync bool, reasons []string) { + reasons = []string{} + sync = false + + changelog, err := diff.Diff(oldSpec, newSpec) + if err != nil { + logger.Infof("cannot get diff, do not do anything, %+v", err) + return false, reasons + } + + if len(changelog) > 0 { + sync = true + } + + for _, change := range changelog { + msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'", + change.Type, change.Path, change.From, change.To) + reasons = append(reasons, msg) + } + + return sync, reasons +} + +// Check if we need to synchronize connection pooler deployment due to new +// defaults, that are different from what we see in the DeploymentSpec +func needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) { + + reasons = []string{} + sync = false + + config := Config.OpConfig.ConnectionPooler + podTemplate := deployment.Spec.Template + poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer] + + if spec == nil { + spec = &acidv1.ConnectionPooler{} + } + if spec.NumberOfInstances == nil && + *deployment.Spec.Replicas != *config.NumberOfInstances { + + sync = true + msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)", + *deployment.Spec.Replicas, *config.NumberOfInstances) + reasons = append(reasons, msg) + } + + if spec.DockerImage == "" && + poolerContainer.Image != config.Image { + + sync = true + msg := fmt.Sprintf("DockerImage is different (having %s, required %s)", + poolerContainer.Image, config.Image) + reasons = append(reasons, msg) + } + + expectedResources, err := generateResourceRequirements(spec.Resources, + makeDefaultConnectionPoolerResources(&Config.OpConfig)) + + // An error to generate expected resources means something is not quite + // right, but for the purpose of robustness do not panic here, just report + // and ignore resources comparison (in the worst case there will be no + // updates for new resource values). + if err == nil && syncResources(&poolerContainer.Resources, expectedResources) { + sync = true + msg := fmt.Sprintf("Resources are different (having %+v, required %+v)", + poolerContainer.Resources, expectedResources) + reasons = append(reasons, msg) + } + + if err != nil { + return false, reasons + } + + for _, env := range poolerContainer.Env { + if spec.User == "" && env.Name == "PGUSER" { + ref := env.ValueFrom.SecretKeyRef.LocalObjectReference + secretName := Config.OpConfig.SecretNameTemplate.Format( + "username", strings.Replace(config.User, "_", "-", -1), + "cluster", deployment.ClusterName, + "tprkind", acidv1.PostgresCRDResourceKind, + "tprgroup", acidzalando.GroupName) + + if ref.Name != secretName { + sync = true + msg := fmt.Sprintf("pooler user is different (having %s, required %s)", + ref.Name, config.User) + reasons = append(reasons, msg) + } + } + + if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema { + sync = true + msg := fmt.Sprintf("pooler schema is different (having %s, required %s)", + env.Value, config.Schema) + reasons = append(reasons, msg) + } + } + + return sync, reasons +} + +// Generate default resource section for connection pooler deployment, to be +// used if nothing custom is specified in the manifest +func makeDefaultConnectionPoolerResources(config *config.Config) acidv1.Resources { + + defaultRequests := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, + } + defaultLimits := acidv1.ResourceDescription{ + CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, + Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, + } + + return acidv1.Resources{ + ResourceRequests: defaultRequests, + ResourceLimits: defaultLimits, + } +} + +func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql) { + var v []string + + var input []*bool + if oldSpec == nil { + input = []*bool{nil, nil, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler} + } else { + input = []*bool{oldSpec.Spec.EnableConnectionPooler, oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler} + } + + for _, b := range input { + if b == nil { + v = append(v, "nil") + } else { + v = append(v, fmt.Sprintf("%v", *b)) + } + } + + log.Debugf("syncing connection pooler from (%v, %v) to (%v, %v)", v[0], v[1], v[2], v[3]) +} + +func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, LookupFunction InstallFunction) (SyncReason, error) { + + var reason SyncReason + var err error + var newNeedConnectionPooler, oldNeedConnectionPooler bool + oldNeedConnectionPooler = false + + if oldSpec == nil { + oldSpec = &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + } + } + + needSync, _ := needSyncConnectionPoolerSpecs(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler, c.logger) + masterChanges, err := diff.Diff(oldSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableConnectionPooler) + if err != nil { + c.logger.Error("Error in getting diff of master connection pooler changes") + } + replicaChanges, err := diff.Diff(oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler) + if err != nil { + c.logger.Error("Error in getting diff of replica connection pooler changes") + } + + // skip pooler sync only + // 1. if there is no diff in spec, AND + // 2. if connection pooler is already there and is also required as per newSpec + // + // Handling the case when connectionPooler is not there but it is required + // as per spec, hence do not skip syncing in that case, even though there + // is no diff in specs + if (!needSync && len(masterChanges) <= 0 && len(replicaChanges) <= 0) && + (c.ConnectionPooler != nil && (needConnectionPooler(&newSpec.Spec))) { + c.logger.Debugln("syncing pooler is not required") + return nil, nil + } + + logPoolerEssentials(c.logger, oldSpec, newSpec) + + // Check and perform the sync requirements for each of the roles. + for _, role := range [2]PostgresRole{Master, Replica} { + + if role == Master { + newNeedConnectionPooler = needMasterConnectionPoolerWorker(&newSpec.Spec) + if oldSpec != nil { + oldNeedConnectionPooler = needMasterConnectionPoolerWorker(&oldSpec.Spec) + } + } else { + newNeedConnectionPooler = needReplicaConnectionPoolerWorker(&newSpec.Spec) + if oldSpec != nil { + oldNeedConnectionPooler = needReplicaConnectionPoolerWorker(&oldSpec.Spec) + } + } + + // if the call is via createConnectionPooler, then it is required to initialize + // the structure + if c.ConnectionPooler == nil { + c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} + } + if c.ConnectionPooler[role] == nil { + c.ConnectionPooler[role] = &ConnectionPoolerObjects{ + Deployment: nil, + Service: nil, + Name: c.connectionPoolerName(role), + ClusterName: c.ClusterName, + Namespace: c.Namespace, + LookupFunction: false, + Role: role, + } + } + + if newNeedConnectionPooler { + // Try to sync in any case. If we didn't needed connection pooler before, + // it means we want to create it. If it was already present, still sync + // since it could happen that there is no difference in specs, and all + // the resources are remembered, but the deployment was manually deleted + // in between + + // in this case also do not forget to install lookup function as for + // creating cluster + if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { + newConnectionPooler := newSpec.Spec.ConnectionPooler + + specSchema := "" + specUser := "" + + if newConnectionPooler != nil { + specSchema = newConnectionPooler.Schema + specUser = newConnectionPooler.User + } + + schema := util.Coalesce( + specSchema, + c.OpConfig.ConnectionPooler.Schema) + + user := util.Coalesce( + specUser, + c.OpConfig.ConnectionPooler.User) + + if err = LookupFunction(schema, user, role); err != nil { + return NoSync, err + } + } + + if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { + c.logger.Errorf("could not sync connection pooler: %v", err) + return reason, err + } + } else { + // delete and cleanup resources if they are already detected + if c.ConnectionPooler[role] != nil && + (c.ConnectionPooler[role].Deployment != nil || + c.ConnectionPooler[role].Service != nil) { + + if err = c.deleteConnectionPooler(role); err != nil { + c.logger.Warningf("could not remove connection pooler: %v", err) + } + } + } + } + if !needMasterConnectionPoolerWorker(&newSpec.Spec) && + !needReplicaConnectionPoolerWorker(&newSpec.Spec) { + if err = c.deleteConnectionPoolerSecret(); err != nil { + c.logger.Warningf("could not remove connection pooler secret: %v", err) + } + } + + return reason, nil +} + +// Synchronize connection pooler resources. Effectively we're interested only in +// synchronizing the corresponding deployment, but in case of deployment or +// service is missing, create it. After checking, also remember an object for +// the future references. +func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) ( + SyncReason, error) { + + deployment, err := c.KubeClient. + Deployments(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "deployment %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + deploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) + if err != nil { + msg = "could not generate deployment for connection pooler: %v" + return NoSync, fmt.Errorf(msg, err) + } + + deployment, err := c.KubeClient. + Deployments(deploymentSpec.Namespace). + Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Deployment = deployment + } else if err != nil { + msg := "could not get connection pooler deployment to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + c.ConnectionPooler[role].Deployment = deployment + // actual synchronization + + var oldConnectionPooler *acidv1.ConnectionPooler + + if oldSpec != nil { + oldConnectionPooler = oldSpec.Spec.ConnectionPooler + } + + newConnectionPooler := newSpec.Spec.ConnectionPooler + // sync implementation below assumes that both old and new specs are + // not nil, but it can happen. To avoid any confusion like updating a + // deployment because the specification changed from nil to an empty + // struct (that was initialized somewhere before) replace any nil with + // an empty spec. + if oldConnectionPooler == nil { + oldConnectionPooler = &acidv1.ConnectionPooler{} + } + + if newConnectionPooler == nil { + newConnectionPooler = &acidv1.ConnectionPooler{} + } + + c.logger.Infof("old: %+v, new %+v", oldConnectionPooler, newConnectionPooler) + + var specSync bool + var specReason []string + + if oldSpec != nil { + specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler, c.logger) + } + + defaultsSync, defaultsReason := needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment) + reason := append(specReason, defaultsReason...) + + if specSync || defaultsSync { + c.logger.Infof("Update connection pooler deployment %s, reason: %+v", + c.connectionPoolerName(role), reason) + newDeploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) + if err != nil { + msg := "could not generate deployment for connection pooler: %v" + return reason, fmt.Errorf(msg, err) + } + + deployment, err := updateConnectionPoolerDeployment(c.KubeClient, + newDeploymentSpec) + + if err != nil { + return reason, err + } + c.ConnectionPooler[role].Deployment = deployment + } + } + + newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(c.ConnectionPooler[role].Deployment.Annotations)) + if newAnnotations != nil { + deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations) + if err != nil { + return nil, err + } + c.ConnectionPooler[role].Deployment = deployment + } + + service, err := c.KubeClient. + Services(c.Namespace). + Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}) + + if err != nil && k8sutil.ResourceNotFound(err) { + msg := "Service %s for connection pooler synchronization is not found, create it" + c.logger.Warningf(msg, c.connectionPoolerName(role)) + + serviceSpec := c.generateConnectionPoolerService(c.ConnectionPooler[role]) + service, err := c.KubeClient. + Services(serviceSpec.Namespace). + Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) + + if err != nil { + return NoSync, err + } + c.ConnectionPooler[role].Service = service + + } else if err != nil { + msg := "could not get connection pooler service to sync: %v" + return NoSync, fmt.Errorf(msg, err) + } else { + // Service updates are not supported and probably not that useful anyway + c.ConnectionPooler[role].Service = service + } + + return NoSync, nil +} diff --git a/pkg/cluster/connection_pooler_new_test.go b/pkg/cluster/connection_pooler_new_test.go new file mode 100644 index 000000000..72b3408e3 --- /dev/null +++ b/pkg/cluster/connection_pooler_new_test.go @@ -0,0 +1,45 @@ +package cluster + +import ( + "testing" + + "context" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/labels" + + "k8s.io/client-go/kubernetes/fake" +) + +func TestFakeClient(t *testing.T) { + clientSet := fake.NewSimpleClientset() + namespace := "default" + + l := labels.Set(map[string]string{ + "application": "spilo", + }) + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment1", + Namespace: namespace, + Labels: l, + }, + } + + clientSet.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{}) + + deployment2, _ := clientSet.AppsV1().Deployments(namespace).Get(context.TODO(), "my-deployment1", metav1.GetOptions{}) + + if deployment.ObjectMeta.Name != deployment2.ObjectMeta.Name { + t.Errorf("Deployments are not equal") + } + + deployments, _ := clientSet.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "application=spilo"}) + + if len(deployments.Items) != 1 { + t.Errorf("Label search does not work") + } +} diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go new file mode 100644 index 000000000..280adb101 --- /dev/null +++ b/pkg/cluster/connection_pooler_test.go @@ -0,0 +1,1008 @@ +package cluster + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" + "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func mockInstallLookupFunction(schema string, user string, role PostgresRole) error { + return nil +} + +func boolToPointer(value bool) *bool { + return &value +} + +func int32ToPointer(value int32) *int32 { + return &value +} + +func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error { + for _, role := range [2]PostgresRole{Master, Replica} { + + poolerLabels := cluster.labelsSet(false) + poolerLabels["application"] = "db-connection-pooler" + poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role) + + if cluster.ConnectionPooler[role] != nil && cluster.ConnectionPooler[role].Deployment != nil && + util.MapContains(cluster.ConnectionPooler[role].Deployment.Labels, poolerLabels) && + (cluster.ConnectionPooler[role].Deployment.Spec.Replicas == nil || + *cluster.ConnectionPooler[role].Deployment.Spec.Replicas != 2) { + return fmt.Errorf("Wrong number of instances") + } + } + return nil +} + +func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + for _, role := range []PostgresRole{Master, Replica} { + poolerLabels := cluster.labelsSet(false) + poolerLabels["application"] = "db-connection-pooler" + poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role) + + if cluster.ConnectionPooler[role].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[role].Deployment.Labels, poolerLabels) { + return fmt.Errorf("Deployment was not saved or labels not attached %s %s", role, cluster.ConnectionPooler[role].Deployment.Labels) + } + + if cluster.ConnectionPooler[role].Service == nil || !util.MapContains(cluster.ConnectionPooler[role].Service.Labels, poolerLabels) { + return fmt.Errorf("Service was not saved or labels not attached %s %s", role, cluster.ConnectionPooler[role].Service.Labels) + } + } + + return nil +} + +func MasterObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + poolerLabels := cluster.labelsSet(false) + poolerLabels["application"] = "db-connection-pooler" + poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Master) + + if cluster.ConnectionPooler[Master].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Master].Deployment.Labels, poolerLabels) { + return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Deployment.Labels) + } + + if cluster.ConnectionPooler[Master].Service == nil || !util.MapContains(cluster.ConnectionPooler[Master].Service.Labels, poolerLabels) { + return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Service.Labels) + } + + return nil +} + +func ReplicaObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error { + if cluster.ConnectionPooler == nil { + return fmt.Errorf("Connection pooler resources are empty") + } + + poolerLabels := cluster.labelsSet(false) + poolerLabels["application"] = "db-connection-pooler" + poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Replica) + + if cluster.ConnectionPooler[Replica].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Deployment.Labels, poolerLabels) { + return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Deployment.Labels) + } + + if cluster.ConnectionPooler[Replica].Service == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Service.Labels, poolerLabels) { + return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Service.Labels) + } + + return nil +} + +func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error { + for _, role := range [2]PostgresRole{Master, Replica} { + if cluster.ConnectionPooler[role] != nil && + (cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) { + return fmt.Errorf("Connection pooler was not deleted for role %v", role) + } + } + + return nil +} + +func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error { + + if cluster.ConnectionPooler[Master] != nil && + (cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) { + return fmt.Errorf("Connection pooler master was not deleted") + } + return nil +} + +func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error { + + if cluster.ConnectionPooler[Replica] != nil && + (cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) { + return fmt.Errorf("Connection pooler replica was not deleted") + } + return nil +} + +func noEmptySync(cluster *Cluster, err error, reason SyncReason) error { + for _, msg := range reason { + if strings.HasPrefix(msg, "update [] from '' to '") { + return fmt.Errorf("There is an empty reason, %s", msg) + } + } + + return nil +} + +func TestNeedConnectionPooler(t *testing.T) { + testName := "Test how connection pooler can be enabled" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is not enabled with full definition", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + } + + if !needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is not enabled with flag", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(false), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is still enabled with flag being false", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is not enabled with flag and full", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(false), + EnableReplicaConnectionPooler: boolToPointer(false), + ConnectionPooler: nil, + } + + if needMasterConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Connection pooler is enabled with flag false and nil", + testName) + } + + // Test for replica connection pooler + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if needReplicaConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Replica Connection pooler is not enabled with full definition", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + } + + if !needReplicaConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Replica Connection pooler is not enabled with flag", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(false), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if needReplicaConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Replica Connection pooler is still enabled with flag being false", + testName) + } + + cluster.Spec = acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + } + + if !needReplicaConnectionPooler(&cluster.Spec) { + t.Errorf("%s: Replica Connection pooler is not enabled with flag and full", + testName) + } +} + +func TestConnectionPoolerCreateDeletion(t *testing.T) { + + testName := "test connection pooler creation and deletion" + clientSet := fake.NewSimpleClientset() + acidClientSet := fakeacidv1.NewSimpleClientset() + namespace := "default" + + client := k8sutil.KubernetesClient{ + StatefulSetsGetter: clientSet.AppsV1(), + ServicesGetter: clientSet.CoreV1(), + DeploymentsGetter: clientSet.AppsV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + SecretsGetter: clientSet.CoreV1(), + } + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acid-fake-cluster", + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + EnableReplicaConnectionPooler: boolToPointer(true), + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + cluster.Name = "acid-fake-cluster" + cluster.Namespace = "default" + + _, err := cluster.createService(Master) + assert.NoError(t, err) + _, err = cluster.createStatefulSet() + assert.NoError(t, err) + + reason, err := cluster.createConnectionPooler(mockInstallLookupFunction) + + if err != nil { + t.Errorf("%s: Cannot create connection pooler, %s, %+v", + testName, err, reason) + } + for _, role := range [2]PostgresRole{Master, Replica} { + poolerLabels := cluster.labelsSet(false) + poolerLabels["application"] = "db-connection-pooler" + poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role) + + if cluster.ConnectionPooler[role] != nil { + if cluster.ConnectionPooler[role].Deployment == nil && util.MapContains(cluster.ConnectionPooler[role].Deployment.Labels, poolerLabels) { + t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role) + } + + if cluster.ConnectionPooler[role].Service == nil && util.MapContains(cluster.ConnectionPooler[role].Service.Labels, poolerLabels) { + t.Errorf("%s: Connection pooler service is empty for role %s", testName, role) + } + } + } + + oldSpec := &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + EnableReplicaConnectionPooler: boolToPointer(true), + }, + } + newSpec := &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(false), + EnableReplicaConnectionPooler: boolToPointer(false), + }, + } + + // Delete connection pooler via sync + _, err = cluster.syncConnectionPooler(oldSpec, newSpec, mockInstallLookupFunction) + if err != nil { + t.Errorf("%s: Cannot sync connection pooler, %s", testName, err) + } + + for _, role := range [2]PostgresRole{Master, Replica} { + err = cluster.deleteConnectionPooler(role) + if err != nil { + t.Errorf("%s: Cannot delete connection pooler, %s", testName, err) + } + } +} + +func TestConnectionPoolerSync(t *testing.T) { + + testName := "test connection pooler synchronization" + clientSet := fake.NewSimpleClientset() + acidClientSet := fakeacidv1.NewSimpleClientset() + namespace := "default" + + client := k8sutil.KubernetesClient{ + StatefulSetsGetter: clientSet.AppsV1(), + ServicesGetter: clientSet.CoreV1(), + DeploymentsGetter: clientSet.AppsV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + SecretsGetter: clientSet.CoreV1(), + } + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acid-fake-cluster", + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + cluster.Name = "acid-fake-cluster" + cluster.Namespace = "default" + + _, err := cluster.createService(Master) + assert.NoError(t, err) + _, err = cluster.createStatefulSet() + assert.NoError(t, err) + + reason, err := cluster.createConnectionPooler(mockInstallLookupFunction) + + if err != nil { + t.Errorf("%s: Cannot create connection pooler, %s, %+v", + testName, err, reason) + } + + tests := []struct { + subTest string + oldSpec *acidv1.Postgresql + newSpec *acidv1.Postgresql + cluster *Cluster + defaultImage string + defaultInstances int32 + check func(cluster *Cluster, err error, reason SyncReason) error + }{ + { + subTest: "create from scratch", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: MasterObjectsAreSaved, + }, + { + subTest: "create if doesn't exist", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: MasterObjectsAreSaved, + }, + { + subTest: "create if doesn't exist with a flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + }, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: MasterObjectsAreSaved, + }, + { + subTest: "create no replica with flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(false), + }, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, + }, + { + subTest: "create replica if doesn't exist with a flag", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: ReplicaObjectsAreSaved, + }, + { + subTest: "create both master and replica", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + EnableConnectionPooler: boolToPointer(true), + }, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreSaved, + }, + { + subTest: "delete only replica if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: OnlyReplicaDeleted, + }, + { + subTest: "delete only master if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableConnectionPooler: boolToPointer(true), + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + }, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: OnlyMasterDeleted, + }, + { + subTest: "delete if not needed", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, + }, + { + subTest: "cleanup if still there", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{}, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: objectsAreDeleted, + }, + { + subTest: "update image from changed defaults", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: cluster, + defaultImage: "pooler:2.0", + defaultInstances: 2, + check: deploymentUpdated, + }, + { + subTest: "there is no sync from nil to an empty spec", + oldSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: nil, + }, + }, + newSpec: &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + }, + cluster: cluster, + defaultImage: "pooler:1.0", + defaultInstances: 1, + check: noEmptySync, + }, + } + for _, tt := range tests { + tt.cluster.OpConfig.ConnectionPooler.Image = tt.defaultImage + tt.cluster.OpConfig.ConnectionPooler.NumberOfInstances = + int32ToPointer(tt.defaultInstances) + + t.Logf("running test for %s [%s]", testName, tt.subTest) + + reason, err := tt.cluster.syncConnectionPooler(tt.oldSpec, + tt.newSpec, mockInstallLookupFunction) + + if err := tt.check(tt.cluster, err, reason); err != nil { + t.Errorf("%s [%s]: Could not synchronize, %+v", + testName, tt.subTest, err) + } + } +} + +func TestConnectionPoolerPodSpec(t *testing.T) { + testName := "Test connection pooler pod template generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + MaxDBConnections: int32ToPointer(60), + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } + var clusterNoDefaultRes = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{}, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + clusterNoDefaultRes.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + } + + noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + expected error + cluster *Cluster + check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: nil, + cluster: cluster, + check: noCheck, + }, + { + subTest: "no default resources", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`), + cluster: clusterNoDefaultRes, + check: noCheck, + }, + { + subTest: "default resources are set", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: nil, + cluster: cluster, + check: testResources, + }, + { + subTest: "labels for service", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: testLabels, + }, + { + subTest: "required envs", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + expected: nil, + cluster: cluster, + check: testEnvs, + }, + } + for _, role := range [2]PostgresRole{Master, Replica} { + for _, tt := range tests { + podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(role) + + if err != tt.expected && err.Error() != tt.expected.Error() { + t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", + testName, tt.subTest, err, tt.expected) + } + + err = tt.check(cluster, podSpec, role) + if err != nil { + t.Errorf("%s [%s]: Pod spec is incorrect, %+v", + testName, tt.subTest, err) + } + } + } +} + +func TestConnectionPoolerDeploymentSpec(t *testing.T) { + testName := "Test connection pooler deployment spec generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: true, + Name: "", + Role: Master, + }, + } + + noCheck := func(cluster *Cluster, deployment *appsv1.Deployment) error { + return nil + } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + expected error + cluster *Cluster + check func(cluster *Cluster, deployment *appsv1.Deployment) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: noCheck, + }, + { + subTest: "owner reference", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: testDeploymentOwnerReference, + }, + { + subTest: "selector", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + expected: nil, + cluster: cluster, + check: testSelector, + }, + } + for _, tt := range tests { + deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[Master]) + + if err != tt.expected && err.Error() != tt.expected.Error() { + t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", + testName, tt.subTest, err, tt.expected) + } + + err = tt.check(cluster, deployment) + if err != nil { + t.Errorf("%s [%s]: Deployment spec is incorrect, %+v", + testName, tt.subTest, err) + } + } +} + +func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { + cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"] + if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest { + return fmt.Errorf("CPU request does not match, got %s, expected %s", + cpuReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest) + } + + memReq := podSpec.Spec.Containers[0].Resources.Requests["memory"] + if memReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest { + return fmt.Errorf("Memory request does not match, got %s, expected %s", + memReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest) + } + + cpuLim := podSpec.Spec.Containers[0].Resources.Limits["cpu"] + if cpuLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit { + return fmt.Errorf("CPU limit does not match, got %s, expected %s", + cpuLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit) + } + + memLim := podSpec.Spec.Containers[0].Resources.Limits["memory"] + if memLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit { + return fmt.Errorf("Memory limit does not match, got %s, expected %s", + memLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit) + } + + return nil +} + +func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { + poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"] + + if poolerLabels != cluster.connectionPoolerLabels(role, true).MatchLabels["connection-pooler"] { + return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", + podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabels(role, true).MatchLabels) + } + + return nil +} + +func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { + labels := deployment.Spec.Selector.MatchLabels + expected := cluster.connectionPoolerLabels(Master, true).MatchLabels + + if labels["connection-pooler"] != expected["connection-pooler"] { + return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", + labels, expected) + } + + return nil +} + +func testServiceSelector(cluster *Cluster, service *v1.Service, role PostgresRole) error { + selector := service.Spec.Selector + + if selector["connection-pooler"] != cluster.connectionPoolerName(role) { + return fmt.Errorf("Selector is incorrect, got %s, expected %s", + selector["connection-pooler"], cluster.connectionPoolerName(role)) + } + + return nil +} + +func TestConnectionPoolerServiceSpec(t *testing.T) { + testName := "Test connection pooler service spec generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: false, + Role: Master, + }, + Replica: { + Deployment: nil, + Service: nil, + LookupFunction: false, + Role: Replica, + }, + } + + noCheck := func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error { + return nil + } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + cluster *Cluster + check func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + cluster: cluster, + check: noCheck, + }, + { + subTest: "owner reference", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + cluster: cluster, + check: testServiceOwnerReference, + }, + { + subTest: "selector", + spec: &acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + EnableReplicaConnectionPooler: boolToPointer(true), + }, + cluster: cluster, + check: testServiceSelector, + }, + } + for _, role := range [2]PostgresRole{Master, Replica} { + for _, tt := range tests { + service := tt.cluster.generateConnectionPoolerService(tt.cluster.ConnectionPooler[role]) + + if err := tt.check(cluster, service, role); err != nil { + t.Errorf("%s [%s]: Service spec is incorrect, %+v", + testName, tt.subTest, err) + } + } + } +} diff --git a/pkg/cluster/database.go b/pkg/cluster/database.go index 07ea011a6..760b68d72 100644 --- a/pkg/cluster/database.go +++ b/pkg/cluster/database.go @@ -1,10 +1,12 @@ package cluster import ( + "bytes" "database/sql" "fmt" "net" "strings" + "text/template" "time" "github.com/lib/pq" @@ -25,16 +27,66 @@ const ( WHERE a.rolname = ANY($1) ORDER BY 1;` - getDatabasesSQL = `SELECT datname, pg_get_userbyid(datdba) AS owner FROM pg_database;` - createDatabaseSQL = `CREATE DATABASE "%s" OWNER "%s";` - alterDatabaseOwnerSQL = `ALTER DATABASE "%s" OWNER TO "%s";` + getDatabasesSQL = `SELECT datname, pg_get_userbyid(datdba) AS owner FROM pg_database;` + getSchemasSQL = `SELECT n.nspname AS dbschema FROM pg_catalog.pg_namespace n + WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema' ORDER BY 1` + getExtensionsSQL = `SELECT e.extname, n.nspname FROM pg_catalog.pg_extension e + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace ORDER BY 1;` + + createDatabaseSQL = `CREATE DATABASE "%s" OWNER "%s";` + createDatabaseSchemaSQL = `SET ROLE TO "%s"; CREATE SCHEMA IF NOT EXISTS "%s" AUTHORIZATION "%s"` + alterDatabaseOwnerSQL = `ALTER DATABASE "%s" OWNER TO "%s";` + createExtensionSQL = `CREATE EXTENSION IF NOT EXISTS "%s" SCHEMA "%s"` + alterExtensionSQL = `ALTER EXTENSION "%s" SET SCHEMA "%s"` + + globalDefaultPrivilegesSQL = `SET ROLE TO "%s"; + ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO "%s","%s"; + ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO "%s"; + ALTER DEFAULT PRIVILEGES GRANT SELECT ON SEQUENCES TO "%s"; + ALTER DEFAULT PRIVILEGES GRANT INSERT, UPDATE, DELETE ON TABLES TO "%s"; + ALTER DEFAULT PRIVILEGES GRANT USAGE, UPDATE ON SEQUENCES TO "%s"; + ALTER DEFAULT PRIVILEGES GRANT EXECUTE ON FUNCTIONS TO "%s","%s"; + ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO "%s","%s";` + schemaDefaultPrivilegesSQL = `SET ROLE TO "%s"; + GRANT USAGE ON SCHEMA "%s" TO "%s","%s"; + ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT SELECT ON TABLES TO "%s"; + ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT SELECT ON SEQUENCES TO "%s"; + ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT INSERT, UPDATE, DELETE ON TABLES TO "%s"; + ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT USAGE, UPDATE ON SEQUENCES TO "%s"; + ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT EXECUTE ON FUNCTIONS TO "%s","%s"; + ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT USAGE ON TYPES TO "%s","%s";` + + connectionPoolerLookup = ` + CREATE SCHEMA IF NOT EXISTS {{.pooler_schema}}; + + CREATE OR REPLACE FUNCTION {{.pooler_schema}}.user_lookup( + in i_username text, out uname text, out phash text) + RETURNS record AS $$ + BEGIN + SELECT usename, passwd FROM pg_catalog.pg_shadow + WHERE usename = i_username INTO uname, phash; + RETURN; + END; + $$ LANGUAGE plpgsql SECURITY DEFINER; + + REVOKE ALL ON FUNCTION {{.pooler_schema}}.user_lookup(text) + FROM public, {{.pooler_user}}; + GRANT EXECUTE ON FUNCTION {{.pooler_schema}}.user_lookup(text) + TO {{.pooler_user}}; + GRANT USAGE ON SCHEMA {{.pooler_schema}} TO {{.pooler_user}}; + ` ) -func (c *Cluster) pgConnectionString() string { +func (c *Cluster) pgConnectionString(dbname string) string { password := c.systemUsers[constants.SuperuserKeyName].Password - return fmt.Sprintf("host='%s' dbname=postgres sslmode=require user='%s' password='%s' connect_timeout='%d'", + if dbname == "" { + dbname = "postgres" + } + + return fmt.Sprintf("host='%s' dbname='%s' sslmode=require user='%s' password='%s' connect_timeout='%d'", fmt.Sprintf("%s.%s.svc.%s", c.Name, c.Namespace, c.OpConfig.ClusterDomain), + dbname, c.systemUsers[constants.SuperuserKeyName].Name, strings.Replace(password, "$", "\\$", -1), constants.PostgresConnectTimeout/time.Second) @@ -49,13 +101,22 @@ func (c *Cluster) databaseAccessDisabled() bool { } func (c *Cluster) initDbConn() error { - c.setProcessName("initializing db connection") if c.pgDb != nil { return nil } + return c.initDbConnWithName("") +} + +// Worker function for connection initialization. This function does not check +// if the connection is already open, if it is then it will be overwritten. +// Callers need to make sure no connection is open, otherwise we could leak +// connections +func (c *Cluster) initDbConnWithName(dbname string) error { + c.setProcessName("initializing db connection") + var conn *sql.DB - connstring := c.pgConnectionString() + connstring := c.pgConnectionString(dbname) finalerr := retryutil.Retry(constants.PostgresConnectTimeout, constants.PostgresConnectRetryTimeout, func() (bool, error) { @@ -70,12 +131,12 @@ func (c *Cluster) initDbConn() error { } if _, ok := err.(*net.OpError); ok { - c.logger.Errorf("could not connect to PostgreSQL database: %v", err) + c.logger.Warningf("could not connect to Postgres database: %v", err) return false, nil } if err2 := conn.Close(); err2 != nil { - c.logger.Errorf("error when closing PostgreSQL connection after another error: %v", err) + c.logger.Errorf("error when closing Postgres connection after another error: %v", err) return false, err2 } @@ -89,13 +150,23 @@ func (c *Cluster) initDbConn() error { conn.SetMaxOpenConns(1) conn.SetMaxIdleConns(-1) + if c.pgDb != nil { + msg := "closing an existing connection before opening a new one to %s" + c.logger.Warningf(msg, dbname) + c.closeDbConn() + } + c.pgDb = conn return nil } +func (c *Cluster) connectionIsClosed() bool { + return c.pgDb == nil +} + func (c *Cluster) closeDbConn() (err error) { - c.setProcessName("closing db connection") + c.setProcessName("closing database connection") if c.pgDb != nil { c.logger.Debug("closing database connection") if err = c.pgDb.Close(); err != nil { @@ -110,7 +181,7 @@ func (c *Cluster) closeDbConn() (err error) { } func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUserMap, err error) { - c.setProcessName("reading users from the db") + c.setProcessName("reading users from the database") var rows *sql.Rows users = make(spec.PgUserMap) if rows, err = c.pgDb.Query(getUserSQL, pq.Array(userNames)); err != nil { @@ -187,43 +258,141 @@ func (c *Cluster) getDatabases() (dbs map[string]string, err error) { } // executeCreateDatabase creates new database with the given owner. -// The caller is responsible for openinging and closing the database connection. -func (c *Cluster) executeCreateDatabase(datname, owner string) error { - return c.execCreateOrAlterDatabase(datname, owner, createDatabaseSQL, +// The caller is responsible for opening and closing the database connection. +func (c *Cluster) executeCreateDatabase(databaseName, owner string) error { + return c.execCreateOrAlterDatabase(databaseName, owner, createDatabaseSQL, "creating database", "create database") } -// executeCreateDatabase changes the owner of the given database. -// The caller is responsible for openinging and closing the database connection. -func (c *Cluster) executeAlterDatabaseOwner(datname string, owner string) error { - return c.execCreateOrAlterDatabase(datname, owner, alterDatabaseOwnerSQL, +// executeAlterDatabaseOwner changes the owner of the given database. +// The caller is responsible for opening and closing the database connection. +func (c *Cluster) executeAlterDatabaseOwner(databaseName string, owner string) error { + return c.execCreateOrAlterDatabase(databaseName, owner, alterDatabaseOwnerSQL, "changing owner for database", "alter database owner") } -func (c *Cluster) execCreateOrAlterDatabase(datname, owner, statement, doing, operation string) error { - if !c.databaseNameOwnerValid(datname, owner) { +func (c *Cluster) execCreateOrAlterDatabase(databaseName, owner, statement, doing, operation string) error { + if !c.databaseNameOwnerValid(databaseName, owner) { return nil } - c.logger.Infof("%s %q owner %q", doing, datname, owner) - if _, err := c.pgDb.Exec(fmt.Sprintf(statement, datname, owner)); err != nil { + c.logger.Infof("%s %q owner %q", doing, databaseName, owner) + if _, err := c.pgDb.Exec(fmt.Sprintf(statement, databaseName, owner)); err != nil { return fmt.Errorf("could not execute %s: %v", operation, err) } return nil } -func (c *Cluster) databaseNameOwnerValid(datname, owner string) bool { +func (c *Cluster) databaseNameOwnerValid(databaseName, owner string) bool { if _, ok := c.pgUsers[owner]; !ok { - c.logger.Infof("skipping creation of the %q database, user %q does not exist", datname, owner) + c.logger.Infof("skipping creation of the %q database, user %q does not exist", databaseName, owner) return false } - if !databaseNameRegexp.MatchString(datname) { - c.logger.Infof("database %q has invalid name", datname) + if !databaseNameRegexp.MatchString(databaseName) { + c.logger.Infof("database %q has invalid name", databaseName) return false } return true } +// getSchemas returns the list of current database schemas +// The caller is responsible for opening and closing the database connection +func (c *Cluster) getSchemas() (schemas []string, err error) { + var ( + rows *sql.Rows + dbschemas []string + ) + + if rows, err = c.pgDb.Query(getSchemasSQL); err != nil { + return nil, fmt.Errorf("could not query database schemas: %v", err) + } + + defer func() { + if err2 := rows.Close(); err2 != nil { + if err != nil { + err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err) + } else { + err = fmt.Errorf("error when closing query cursor: %v", err2) + } + } + }() + + for rows.Next() { + var dbschema string + + if err = rows.Scan(&dbschema); err != nil { + return nil, fmt.Errorf("error when processing row: %v", err) + } + dbschemas = append(dbschemas, dbschema) + } + + return dbschemas, err +} + +// executeCreateDatabaseSchema creates new database schema with the given owner. +// The caller is responsible for opening and closing the database connection. +func (c *Cluster) executeCreateDatabaseSchema(databaseName, schemaName, dbOwner string, schemaOwner string) error { + return c.execCreateDatabaseSchema(databaseName, schemaName, dbOwner, schemaOwner, createDatabaseSchemaSQL, + "creating database schema", "create database schema") +} + +func (c *Cluster) execCreateDatabaseSchema(databaseName, schemaName, dbOwner, schemaOwner, statement, doing, operation string) error { + if !c.databaseSchemaNameValid(schemaName) { + return nil + } + c.logger.Infof("%s %q owner %q", doing, schemaName, schemaOwner) + if _, err := c.pgDb.Exec(fmt.Sprintf(statement, dbOwner, schemaName, schemaOwner)); err != nil { + return fmt.Errorf("could not execute %s: %v", operation, err) + } + + // set default privileges for schema + c.execAlterSchemaDefaultPrivileges(schemaName, schemaOwner, databaseName) + if schemaOwner != dbOwner { + c.execAlterSchemaDefaultPrivileges(schemaName, dbOwner, databaseName+"_"+schemaName) + c.execAlterSchemaDefaultPrivileges(schemaName, schemaOwner, databaseName+"_"+schemaName) + } + + return nil +} + +func (c *Cluster) databaseSchemaNameValid(schemaName string) bool { + if !databaseNameRegexp.MatchString(schemaName) { + c.logger.Infof("database schema %q has invalid name", schemaName) + return false + } + return true +} + +func (c *Cluster) execAlterSchemaDefaultPrivileges(schemaName, owner, rolePrefix string) error { + if _, err := c.pgDb.Exec(fmt.Sprintf(schemaDefaultPrivilegesSQL, owner, + schemaName, rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix, // schema + schemaName, rolePrefix+constants.ReaderRoleNameSuffix, // tables + schemaName, rolePrefix+constants.ReaderRoleNameSuffix, // sequences + schemaName, rolePrefix+constants.WriterRoleNameSuffix, // tables + schemaName, rolePrefix+constants.WriterRoleNameSuffix, // sequences + schemaName, rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix, // types + schemaName, rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix)); err != nil { // functions + return fmt.Errorf("could not alter default privileges for database schema %s: %v", schemaName, err) + } + + return nil +} + +func (c *Cluster) execAlterGlobalDefaultPrivileges(owner, rolePrefix string) error { + if _, err := c.pgDb.Exec(fmt.Sprintf(globalDefaultPrivilegesSQL, owner, + rolePrefix+constants.WriterRoleNameSuffix, rolePrefix+constants.ReaderRoleNameSuffix, // schemas + rolePrefix+constants.ReaderRoleNameSuffix, // tables + rolePrefix+constants.ReaderRoleNameSuffix, // sequences + rolePrefix+constants.WriterRoleNameSuffix, // tables + rolePrefix+constants.WriterRoleNameSuffix, // sequences + rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix, // types + rolePrefix+constants.ReaderRoleNameSuffix, rolePrefix+constants.WriterRoleNameSuffix)); err != nil { // functions + return fmt.Errorf("could not alter default privileges for database %s: %v", rolePrefix, err) + } + + return nil +} + func makeUserFlags(rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin bool) (result []string) { if rolsuper { result = append(result, constants.RoleFlagSuperuser) @@ -243,3 +412,167 @@ func makeUserFlags(rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin return result } + +// getExtension returns the list of current database extensions +// The caller is responsible for opening and closing the database connection +func (c *Cluster) getExtensions() (dbExtensions map[string]string, err error) { + var ( + rows *sql.Rows + ) + + if rows, err = c.pgDb.Query(getExtensionsSQL); err != nil { + return nil, fmt.Errorf("could not query database extensions: %v", err) + } + + defer func() { + if err2 := rows.Close(); err2 != nil { + if err != nil { + err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err) + } else { + err = fmt.Errorf("error when closing query cursor: %v", err2) + } + } + }() + + dbExtensions = make(map[string]string) + + for rows.Next() { + var extension, schema string + + if err = rows.Scan(&extension, &schema); err != nil { + return nil, fmt.Errorf("error when processing row: %v", err) + } + dbExtensions[extension] = schema + } + + return dbExtensions, err +} + +// executeCreateExtension creates new extension in the given schema. +// The caller is responsible for opening and closing the database connection. +func (c *Cluster) executeCreateExtension(extName, schemaName string) error { + return c.execCreateOrAlterExtension(extName, schemaName, createExtensionSQL, + "creating extension", "create extension") +} + +// executeAlterExtension changes the schema of the given extension. +// The caller is responsible for opening and closing the database connection. +func (c *Cluster) executeAlterExtension(extName, schemaName string) error { + return c.execCreateOrAlterExtension(extName, schemaName, alterExtensionSQL, + "changing schema for extension", "alter extension schema") +} + +func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doing, operation string) error { + + c.logger.Infof("%s %q schema %q", doing, extName, schemaName) + if _, err := c.pgDb.Exec(fmt.Sprintf(statement, extName, schemaName)); err != nil { + return fmt.Errorf("could not execute %s: %v", operation, err) + } + + return nil +} + +// Creates a connection pool credentials lookup function in every database to +// perform remote authentication. +func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string, role PostgresRole) error { + var stmtBytes bytes.Buffer + + c.logger.Info("Installing lookup function") + + // Open a new connection if not yet done. This connection will be used only + // to get the list of databases, not for the actuall installation. + if err := c.initDbConn(); err != nil { + return fmt.Errorf("could not init database connection") + } + defer func() { + if c.connectionIsClosed() { + return + } + + if err := c.closeDbConn(); err != nil { + c.logger.Errorf("could not close database connection: %v", err) + } + }() + + // List of databases we failed to process. At the moment it function just + // like a flag to retry on the next sync, but in the future we may want to + // retry only necessary parts, so let's keep the list. + failedDatabases := []string{} + currentDatabases, err := c.getDatabases() + if err != nil { + msg := "could not get databases to install pooler lookup function: %v" + return fmt.Errorf(msg, err) + } + + // We've got the list of target databases, now close this connection to + // open a new one to every each of them. + if err := c.closeDbConn(); err != nil { + c.logger.Errorf("could not close database connection: %v", err) + } + + templater := template.Must(template.New("sql").Parse(connectionPoolerLookup)) + params := TemplateParams{ + "pooler_schema": poolerSchema, + "pooler_user": poolerUser, + } + + if err := templater.Execute(&stmtBytes, params); err != nil { + msg := "could not prepare sql statement %+v: %v" + return fmt.Errorf(msg, params, err) + } + + for dbname := range currentDatabases { + + if dbname == "template0" || dbname == "template1" { + continue + } + + c.logger.Infof("install pooler lookup function into database '%s'", dbname) + + // golang sql will do retries couple of times if pq driver reports + // connections issues (driver.ErrBadConn), but since our query is + // idempotent, we can retry in a view of other errors (e.g. due to + // failover a db is temporary in a read-only mode or so) to make sure + // it was applied. + execErr := retryutil.Retry( + constants.PostgresConnectTimeout, + constants.PostgresConnectRetryTimeout, + func() (bool, error) { + + // At this moment we are not connected to any database + if err := c.initDbConnWithName(dbname); err != nil { + msg := "could not init database connection to %s" + return false, fmt.Errorf(msg, dbname) + } + defer func() { + if err := c.closeDbConn(); err != nil { + msg := "could not close database connection: %v" + c.logger.Errorf(msg, err) + } + }() + + if _, err = c.pgDb.Exec(stmtBytes.String()); err != nil { + msg := fmt.Errorf("could not execute sql statement %s: %v", + stmtBytes.String(), err) + return false, msg + } + + return true, nil + }) + + if execErr != nil { + c.logger.Errorf("could not execute after retries %s: %v", + stmtBytes.String(), err) + // process other databases + failedDatabases = append(failedDatabases, dbname) + continue + } + c.logger.Infof("pooler lookup function installed into %s", dbname) + } + + if len(failedDatabases) == 0 { + c.ConnectionPooler[role].LookupFunction = true + } + + return nil +} diff --git a/pkg/cluster/exec.go b/pkg/cluster/exec.go index 8dd6bd91d..8b5089b4e 100644 --- a/pkg/cluster/exec.go +++ b/pkg/cluster/exec.go @@ -2,10 +2,11 @@ package cluster import ( "bytes" + "context" "fmt" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" @@ -23,7 +24,7 @@ func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) ( execErr bytes.Buffer ) - pod, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{}) + pod, err := c.KubeClient.Pods(podName.Namespace).Get(context.TODO(), podName.Name, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("could not get pod info: %v", err) } diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index e2251a67c..83098b8a9 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1,9 +1,13 @@ package cluster import ( + "context" "encoding/json" "fmt" + "path" "sort" + "strconv" + "strings" "github.com/sirupsen/logrus" @@ -20,17 +24,20 @@ import ( "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" "k8s.io/apimachinery/pkg/labels" ) const ( - pgBinariesLocationTemplate = "/usr/lib/postgresql/%s/bin" + pgBinariesLocationTemplate = "/usr/lib/postgresql/%v/bin" patroniPGBinariesParameterName = "bin_dir" patroniPGParametersParameterName = "parameters" patroniPGHBAConfParameterName = "pg_hba" localHost = "127.0.0.1/32" + connectionPoolerContainer = "connection-pooler" + pgPort = 5432 ) type pgUser struct { @@ -43,6 +50,8 @@ type patroniDCS struct { LoopWait uint32 `json:"loop_wait,omitempty"` RetryTimeout uint32 `json:"retry_timeout,omitempty"` MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"` + SynchronousMode bool `json:"synchronous_mode,omitempty"` + SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"` PGBootstrapConfiguration map[string]interface{} `json:"postgresql,omitempty"` Slots map[string]map[string]string `json:"slots,omitempty"` } @@ -84,6 +93,28 @@ func (c *Cluster) serviceName(role PostgresRole) string { return name } +func (c *Cluster) serviceAddress(role PostgresRole) string { + service, exist := c.Services[role] + + if exist { + return service.ObjectMeta.Name + } + + c.logger.Warningf("No service for role %s", role) + return "" +} + +func (c *Cluster) servicePort(role PostgresRole) string { + service, exist := c.Services[role] + + if exist { + return fmt.Sprint(service.Spec.Ports[0].Port) + } + + c.logger.Warningf("No service for role %s", role) + return "" +} + func (c *Cluster) podDisruptionBudgetName() string { return c.OpConfig.PDBNameFormat.Format("cluster", c.Name) } @@ -92,10 +123,19 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources { config := c.OpConfig - defaultRequests := acidv1.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest} - defaultLimits := acidv1.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit} + defaultRequests := acidv1.ResourceDescription{ + CPU: config.Resources.DefaultCPURequest, + Memory: config.Resources.DefaultMemoryRequest, + } + defaultLimits := acidv1.ResourceDescription{ + CPU: config.Resources.DefaultCPULimit, + Memory: config.Resources.DefaultMemoryLimit, + } - return acidv1.Resources{ResourceRequests: defaultRequests, ResourceLimits: defaultLimits} + return acidv1.Resources{ + ResourceRequests: defaultRequests, + ResourceLimits: defaultLimits, + } } func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) { @@ -149,7 +189,7 @@ func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceD return requests, nil } -func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, logger *logrus.Entry) (string, error) { +func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, EnablePgVersionEnvVar bool, logger *logrus.Entry) (string, error) { config := spiloConfiguration{} config.Bootstrap = pgBootstrap{} @@ -222,9 +262,22 @@ PatroniInitDBParams: if patroni.Slots != nil { config.Bootstrap.DCS.Slots = patroni.Slots } + if patroni.SynchronousMode { + config.Bootstrap.DCS.SynchronousMode = patroni.SynchronousMode + } + if patroni.SynchronousModeStrict != false { + config.Bootstrap.DCS.SynchronousModeStrict = patroni.SynchronousModeStrict + } config.PgLocalConfiguration = make(map[string]interface{}) - config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion) + + // the newer and preferred way to specify the PG version is to use the `PGVERSION` env variable + // setting postgresq.bin_dir in the SPILO_CONFIGURATION still works and takes precedence over PGVERSION + // so we add postgresq.bin_dir only if PGVERSION is unused + // see PR 222 in Spilo + if !EnablePgVersionEnvVar { + config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion) + } if len(pg.Parameters) > 0 { local, bootstrap := getLocalAndBoostrapPostgreSQLParameters(pg.Parameters) @@ -267,25 +320,39 @@ func getLocalAndBoostrapPostgreSQLParameters(parameters map[string]string) (loca return } -func nodeAffinity(nodeReadinessLabel map[string]string) *v1.Affinity { - matchExpressions := make([]v1.NodeSelectorRequirement, 0) - if len(nodeReadinessLabel) == 0 { +func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity { + if len(nodeReadinessLabel) == 0 && nodeAffinity == nil { return nil } - for k, v := range nodeReadinessLabel { - matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{ - Key: k, - Operator: v1.NodeSelectorOpIn, - Values: []string{v}, - }) + nodeAffinityCopy := *&v1.NodeAffinity{} + if nodeAffinity != nil { + nodeAffinityCopy = *nodeAffinity.DeepCopy() + } + if len(nodeReadinessLabel) > 0 { + matchExpressions := make([]v1.NodeSelectorRequirement, 0) + for k, v := range nodeReadinessLabel { + matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{ + Key: k, + Operator: v1.NodeSelectorOpIn, + Values: []string{v}, + }) + } + nodeReadinessSelectorTerm := v1.NodeSelectorTerm{MatchExpressions: matchExpressions} + if nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution == nil { + nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + nodeReadinessSelectorTerm, + }, + } + } else { + nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{ + NodeSelectorTerms: append(nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, nodeReadinessSelectorTerm), + } + } } return &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{{MatchExpressions: matchExpressions}}, - }, - }, + NodeAffinity: &nodeAffinityCopy, } } @@ -315,7 +382,11 @@ func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]stri return *tolerationsSpec } - if len(podToleration["key"]) > 0 || len(podToleration["operator"]) > 0 || len(podToleration["value"]) > 0 || len(podToleration["effect"]) > 0 { + if len(podToleration["key"]) > 0 || + len(podToleration["operator"]) > 0 || + len(podToleration["value"]) > 0 || + len(podToleration["effect"]) > 0 { + return []v1.Toleration{ { Key: podToleration["key"], @@ -382,15 +453,15 @@ func generateContainer( VolumeMounts: volumeMounts, Env: envVars, SecurityContext: &v1.SecurityContext{ - Privileged: &privilegedMode, - ReadOnlyRootFilesystem: util.False(), + AllowPrivilegeEscalation: &privilegedMode, + Privileged: &privilegedMode, + ReadOnlyRootFilesystem: util.False(), }, } } func generateSidecarContainers(sidecars []acidv1.Sidecar, - volumeMounts []v1.VolumeMount, defaultResources acidv1.Resources, - superUserName string, credentialsSecretName string, logger *logrus.Entry) ([]v1.Container, error) { + defaultResources acidv1.Resources, startIndex int, logger *logrus.Entry) ([]v1.Container, error) { if len(sidecars) > 0 { result := make([]v1.Container, 0) @@ -409,7 +480,7 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar, return nil, err } - sc := getSidecarContainer(sidecar, index, volumeMounts, resources, superUserName, credentialsSecretName, logger) + sc := getSidecarContainer(sidecar, startIndex+index, resources) result = append(result, *sc) } return result, nil @@ -417,17 +488,66 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar, return nil, nil } +// adds common fields to sidecars +func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string, logger *logrus.Entry) []v1.Container { + result := []v1.Container{} + + for _, container := range in { + container.VolumeMounts = append(container.VolumeMounts, volumeMounts...) + env := []v1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "POSTGRES_USER", + Value: superUserName, + }, + { + Name: "POSTGRES_PASSWORD", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: credentialsSecretName, + }, + Key: "password", + }, + }, + }, + } + mergedEnv := append(env, container.Env...) + container.Env = deduplicateEnvVars(mergedEnv, container.Name, logger) + result = append(result, container) + } + + return result +} + // Check whether or not we're requested to mount an shm volume, // taking into account that PostgreSQL manifest has precedence. -func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) *bool { - if pgSpec.ShmVolume != nil && *pgSpec.ShmVolume { - return pgSpec.ShmVolume +func mountShmVolumeNeeded(opConfig config.Config, spec *acidv1.PostgresSpec) *bool { + if spec.ShmVolume != nil && *spec.ShmVolume { + return spec.ShmVolume } return opConfig.ShmVolume } -func generatePodTemplate( +func (c *Cluster) generatePodTemplate( namespace string, labels labels.Set, annotations map[string]string, @@ -435,8 +555,11 @@ func generatePodTemplate( initContainers []v1.Container, sidecarContainers []v1.Container, tolerationsSpec *[]v1.Toleration, + spiloRunAsUser *int64, + spiloRunAsGroup *int64, spiloFSGroup *int64, nodeAffinity *v1.Affinity, + schedulerName *string, terminateGracePeriod int64, podServiceAccountName string, kubeIAMRole string, @@ -446,6 +569,7 @@ func generatePodTemplate( podAntiAffinityTopologyKey string, additionalSecretMount string, additionalSecretMountPath string, + additionalVolumes []acidv1.AdditionalVolume, ) (*v1.PodTemplateSpec, error) { terminateGracePeriodSeconds := terminateGracePeriod @@ -453,6 +577,14 @@ func generatePodTemplate( containers = append(containers, sidecarContainers...) securityContext := v1.PodSecurityContext{} + if spiloRunAsUser != nil { + securityContext.RunAsUser = spiloRunAsUser + } + + if spiloRunAsGroup != nil { + securityContext.RunAsGroup = spiloRunAsGroup + } + if spiloFSGroup != nil { securityContext.FSGroup = spiloFSGroup } @@ -466,6 +598,10 @@ func generatePodTemplate( SecurityContext: &securityContext, } + if schedulerName != nil { + podSpec.SchedulerName = *schedulerName + } + if shmVolume != nil && *shmVolume { addShmVolume(&podSpec) } @@ -484,6 +620,10 @@ func generatePodTemplate( addSecretVolume(&podSpec, additionalSecretMount, additionalSecretMountPath) } + if additionalVolumes != nil { + c.addAdditionalVolumes(&podSpec, additionalVolumes) + } + template := v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, @@ -543,10 +683,6 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri Name: "KUBERNETES_ROLE_LABEL", Value: c.OpConfig.PodRoleLabel, }, - { - Name: "KUBERNETES_LABELS", - Value: labels.Set(c.OpConfig.ClusterLabels).String(), - }, { Name: "PGPASSWORD_SUPERUSER", ValueFrom: &v1.EnvVarSource{ @@ -582,20 +718,18 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri Value: c.OpConfig.PamRoleName, }, } + if c.OpConfig.EnablePgVersionEnvVar { + envVars = append(envVars, v1.EnvVar{Name: "PGVERSION", Value: c.Spec.PgVersion}) + } + // Spilo expects cluster labels as JSON + if clusterLabels, err := json.Marshal(labels.Set(c.OpConfig.ClusterLabels)); err != nil { + envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_LABELS", Value: labels.Set(c.OpConfig.ClusterLabels).String()}) + } else { + envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_LABELS", Value: string(clusterLabels)}) + } if spiloConfiguration != "" { envVars = append(envVars, v1.EnvVar{Name: "SPILO_CONFIGURATION", Value: spiloConfiguration}) } - if c.OpConfig.WALES3Bucket != "" { - envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) - envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) - envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) - } - - if c.OpConfig.LogS3Bucket != "" { - envVars = append(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket}) - envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) - envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""}) - } if c.patroniUsesKubernetes() { envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"}) @@ -603,7 +737,11 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost}) } - if cloneDescription.ClusterName != "" { + if c.patroniKubernetesUseConfigMaps() { + envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"}) + } + + if cloneDescription != nil && cloneDescription.ClusterName != "" { envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...) } @@ -611,10 +749,34 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...) } + // add vars taken from pod_environment_configmap and pod_environment_secret first + // (to allow them to override the globals set in the operator config) if len(customPodEnvVarsList) > 0 { envVars = append(envVars, customPodEnvVarsList...) } + if c.OpConfig.WALES3Bucket != "" { + envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) + envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) + } + + if c.OpConfig.WALGSBucket != "" { + envVars = append(envVars, v1.EnvVar{Name: "WAL_GS_BUCKET", Value: c.OpConfig.WALGSBucket}) + envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) + } + + if c.OpConfig.GCPCredentials != "" { + envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials}) + } + + if c.OpConfig.LogS3Bucket != "" { + envVars = append(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket}) + envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))}) + envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""}) + } + return envVars } @@ -633,65 +795,93 @@ func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus. result = append(result, input[i]) } else if names[va.Name] == 1 { names[va.Name]++ - logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored", - va.Name, containerName) + + // Some variables (those to configure the WAL_ and LOG_ shipping) may be overwritten, only log as info + if strings.HasPrefix(va.Name, "WAL_") || strings.HasPrefix(va.Name, "LOG_") { + logger.Infof("global variable %q has been overwritten by configmap/secret for container %q", + va.Name, containerName) + } else { + logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored", + va.Name, containerName) + } } } return result } -func getSidecarContainer(sidecar acidv1.Sidecar, index int, volumeMounts []v1.VolumeMount, - resources *v1.ResourceRequirements, superUserName string, credentialsSecretName string, logger *logrus.Entry) *v1.Container { +// Return list of variables the pod recieved from the configured ConfigMap +func (c *Cluster) getPodEnvironmentConfigMapVariables() ([]v1.EnvVar, error) { + configMapPodEnvVarsList := make([]v1.EnvVar, 0) + + if c.OpConfig.PodEnvironmentConfigMap.Name == "" { + return configMapPodEnvVarsList, nil + } + + cm, err := c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get( + context.TODO(), + c.OpConfig.PodEnvironmentConfigMap.Name, + metav1.GetOptions{}) + if err != nil { + // if not found, try again using the cluster's namespace if it's different (old behavior) + if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace { + cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get( + context.TODO(), + c.OpConfig.PodEnvironmentConfigMap.Name, + metav1.GetOptions{}) + } + if err != nil { + return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) + } + } + for k, v := range cm.Data { + configMapPodEnvVarsList = append(configMapPodEnvVarsList, v1.EnvVar{Name: k, Value: v}) + } + return configMapPodEnvVarsList, nil +} + +// Return list of variables the pod received from the configured Secret +func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) { + secretPodEnvVarsList := make([]v1.EnvVar, 0) + + if c.OpConfig.PodEnvironmentSecret == "" { + return secretPodEnvVarsList, nil + } + + secret, err := c.KubeClient.Secrets(c.Namespace).Get( + context.TODO(), + c.OpConfig.PodEnvironmentSecret, + metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not read Secret PodEnvironmentSecretName: %v", err) + } + + for k := range secret.Data { + secretPodEnvVarsList = append(secretPodEnvVarsList, + v1.EnvVar{Name: k, ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: c.OpConfig.PodEnvironmentSecret, + }, + Key: k, + }, + }}) + } + + return secretPodEnvVarsList, nil +} + +func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container { name := sidecar.Name if name == "" { name = fmt.Sprintf("sidecar-%d", index) } - env := []v1.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.name", - }, - }, - }, - { - Name: "POD_NAMESPACE", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "POSTGRES_USER", - Value: superUserName, - }, - { - Name: "POSTGRES_PASSWORD", - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: credentialsSecretName, - }, - Key: "password", - }, - }, - }, - } - if len(sidecar.Env) > 0 { - env = append(env, sidecar.Env...) - } return &v1.Container{ Name: name, Image: sidecar.DockerImage, ImagePullPolicy: v1.PullIfNotPresent, Resources: *resources, - VolumeMounts: volumeMounts, - Env: deduplicateEnvVars(env, name, logger), + Env: sidecar.Env, Ports: sidecar.Ports, } } @@ -716,6 +906,15 @@ func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acid } } +func extractPgVersionFromBinPath(binPath string, template string) (string, error) { + var pgVersion float32 + _, err := fmt.Sscanf(binPath, template, &pgVersion) + if err != nil { + return "", err + } + return fmt.Sprintf("%v", pgVersion), nil +} + func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.StatefulSet, error) { var ( @@ -724,6 +923,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef sidecarContainers []v1.Container podTemplate *v1.PodTemplateSpec volumeClaimTemplate *v1.PersistentVolumeClaim + additionalVolumes = spec.AdditionalVolumes ) // Improve me. Please. @@ -733,12 +933,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef request := spec.Resources.ResourceRequests.Memory if request == "" { - request = c.OpConfig.DefaultMemoryRequest + request = c.OpConfig.Resources.DefaultMemoryRequest } limit := spec.Resources.ResourceLimits.Memory if limit == "" { - limit = c.OpConfig.DefaultMemoryLimit + limit = c.OpConfig.Resources.DefaultMemoryLimit } isSmaller, err := util.IsSmallerQuantity(request, limit) @@ -760,12 +960,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef // TODO #413 sidecarRequest := sidecar.Resources.ResourceRequests.Memory if request == "" { - request = c.OpConfig.DefaultMemoryRequest + request = c.OpConfig.Resources.DefaultMemoryRequest } sidecarLimit := sidecar.Resources.ResourceLimits.Memory if limit == "" { - limit = c.OpConfig.DefaultMemoryLimit + limit = c.OpConfig.Resources.DefaultMemoryLimit } isSmaller, err := util.IsSmallerQuantity(sidecarRequest, sidecarLimit) @@ -794,20 +994,34 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef initContainers = spec.InitContainers } - customPodEnvVarsList := make([]v1.EnvVar, 0) - - if c.OpConfig.PodEnvironmentConfigMap != "" { - var cm *v1.ConfigMap - cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) - } - for k, v := range cm.Data { - customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v}) - } - sort.Slice(customPodEnvVarsList, - func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name }) + spiloCompathWalPathList := make([]v1.EnvVar, 0) + if c.OpConfig.EnableSpiloWalPathCompat { + spiloCompathWalPathList = append(spiloCompathWalPathList, + v1.EnvVar{ + Name: "ENABLE_WAL_PATH_COMPAT", + Value: "true", + }, + ) } + + // fetch env vars from custom ConfigMap + configMapEnvVarsList, err := c.getPodEnvironmentConfigMapVariables() + if err != nil { + return nil, err + } + + // fetch env vars from custom ConfigMap + secretEnvVarsList, err := c.getPodEnvironmentSecretVariables() + if err != nil { + return nil, err + } + + // concat all custom pod env vars and sort them + customPodEnvVarsList := append(spiloCompathWalPathList, configMapEnvVarsList...) + customPodEnvVarsList = append(customPodEnvVarsList, secretEnvVarsList...) + sort.Slice(customPodEnvVarsList, + func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name }) + if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" { return nil, fmt.Errorf("s3_wal_path is empty for standby cluster") } @@ -834,84 +1048,184 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef } } - spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger) + spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.OpConfig.EnablePgVersionEnvVar, c.logger) if err != nil { return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err) } // generate environment variables for the spilo container - spiloEnvVars := deduplicateEnvVars( - c.generateSpiloPodEnvVars(c.Postgresql.GetUID(), spiloConfiguration, &spec.Clone, - spec.StandbyCluster, customPodEnvVarsList), c.containerName(), c.logger) + spiloEnvVars := c.generateSpiloPodEnvVars( + c.Postgresql.GetUID(), + spiloConfiguration, + spec.Clone, + spec.StandbyCluster, + customPodEnvVarsList, + ) // pickup the docker image for the spilo container effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage) - volumeMounts := generateVolumeMounts(spec.Volume) - - // generate the spilo container - c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars) - spiloContainer := generateContainer(c.containerName(), - &effectiveDockerImage, - resourceRequirements, - spiloEnvVars, - volumeMounts, - c.OpConfig.Resources.SpiloPrivileged, - ) - - // resolve conflicts between operator-global and per-cluster sidecars - sideCars := c.mergeSidecars(spec.Sidecars) - - resourceRequirementsScalyrSidecar := makeResources( - c.OpConfig.ScalyrCPURequest, - c.OpConfig.ScalyrMemoryRequest, - c.OpConfig.ScalyrCPULimit, - c.OpConfig.ScalyrMemoryLimit, - ) - - // generate scalyr sidecar container - if scalyrSidecar := - generateScalyrSidecarSpec(c.Name, - c.OpConfig.ScalyrAPIKey, - c.OpConfig.ScalyrServerURL, - c.OpConfig.ScalyrImage, - &resourceRequirementsScalyrSidecar, c.logger); scalyrSidecar != nil { - sideCars = append(sideCars, *scalyrSidecar) + // determine the User, Group and FSGroup for the spilo pod + effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser + if spec.SpiloRunAsUser != nil { + effectiveRunAsUser = spec.SpiloRunAsUser } - // generate sidecar containers - if sideCars != nil && len(sideCars) > 0 { - if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) { - c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail") - } - if sidecarContainers, err = generateSidecarContainers(sideCars, volumeMounts, defaultResources, - c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger); err != nil { - return nil, fmt.Errorf("could not generate sidecar containers: %v", err) - } + effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup + if spec.SpiloRunAsGroup != nil { + effectiveRunAsGroup = spec.SpiloRunAsGroup } - tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) - effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) - - // determine the FSGroup for the spilo pod effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup if spec.SpiloFSGroup != nil { effectiveFSGroup = spec.SpiloFSGroup } - annotations := c.generatePodAnnotations(spec) + volumeMounts := generateVolumeMounts(spec.Volume) + + // configure TLS with a custom secret volume + if spec.TLS != nil && spec.TLS.SecretName != "" { + // this is combined with the FSGroup in the section above + // to give read access to the postgres user + defaultMode := int32(0640) + mountPath := "/tls" + additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{ + Name: spec.TLS.SecretName, + MountPath: mountPath, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: spec.TLS.SecretName, + DefaultMode: &defaultMode, + }, + }, + }) + + // use the same filenames as Secret resources by default + certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt") + privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key") + spiloEnvVars = append( + spiloEnvVars, + v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: certFile}, + v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: privateKeyFile}, + ) + + if spec.TLS.CAFile != "" { + // support scenario when the ca.crt resides in a different secret, diff path + mountPathCA := mountPath + if spec.TLS.CASecretName != "" { + mountPathCA = mountPath + "ca" + } + + caFile := ensurePath(spec.TLS.CAFile, mountPathCA, "") + spiloEnvVars = append( + spiloEnvVars, + v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile}, + ) + + // the ca file from CASecretName secret takes priority + if spec.TLS.CASecretName != "" { + additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{ + Name: spec.TLS.CASecretName, + MountPath: mountPathCA, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: spec.TLS.CASecretName, + DefaultMode: &defaultMode, + }, + }, + }) + } + } + } + + // generate the spilo container + c.logger.Debugf("Generating Spilo container, environment variables") + c.logger.Debugf("%v", spiloEnvVars) + + spiloContainer := generateContainer(c.containerName(), + &effectiveDockerImage, + resourceRequirements, + deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger), + volumeMounts, + c.OpConfig.Resources.SpiloPrivileged, + ) + + // generate container specs for sidecars specified in the cluster manifest + clusterSpecificSidecars := []v1.Container{} + if spec.Sidecars != nil && len(spec.Sidecars) > 0 { + // warn if sidecars are defined, but globally disabled (does not apply to globally defined sidecars) + if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) { + c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail") + } + + if clusterSpecificSidecars, err = generateSidecarContainers(spec.Sidecars, defaultResources, 0, c.logger); err != nil { + return nil, fmt.Errorf("could not generate sidecar containers: %v", err) + } + } + + // decrapted way of providing global sidecars + var globalSidecarContainersByDockerImage []v1.Container + var globalSidecarsByDockerImage []acidv1.Sidecar + for name, dockerImage := range c.OpConfig.SidecarImages { + globalSidecarsByDockerImage = append(globalSidecarsByDockerImage, acidv1.Sidecar{Name: name, DockerImage: dockerImage}) + } + if globalSidecarContainersByDockerImage, err = generateSidecarContainers(globalSidecarsByDockerImage, defaultResources, len(clusterSpecificSidecars), c.logger); err != nil { + return nil, fmt.Errorf("could not generate sidecar containers: %v", err) + } + // make the resulting list reproducible + // c.OpConfig.SidecarImages is unsorted by Golang definition + // .Name is unique + sort.Slice(globalSidecarContainersByDockerImage, func(i, j int) bool { + return globalSidecarContainersByDockerImage[i].Name < globalSidecarContainersByDockerImage[j].Name + }) + + // generate scalyr sidecar container + var scalyrSidecars []v1.Container + if scalyrSidecar, err := + generateScalyrSidecarSpec(c.Name, + c.OpConfig.ScalyrAPIKey, + c.OpConfig.ScalyrServerURL, + c.OpConfig.ScalyrImage, + c.OpConfig.ScalyrCPURequest, + c.OpConfig.ScalyrMemoryRequest, + c.OpConfig.ScalyrCPULimit, + c.OpConfig.ScalyrMemoryLimit, + defaultResources, + c.logger); err != nil { + return nil, fmt.Errorf("could not generate Scalyr sidecar: %v", err) + } else { + if scalyrSidecar != nil { + scalyrSidecars = append(scalyrSidecars, *scalyrSidecar) + } + } + + sidecarContainers, conflicts := mergeContainers(clusterSpecificSidecars, c.Config.OpConfig.SidecarContainers, globalSidecarContainersByDockerImage, scalyrSidecars) + for containerName := range conflicts { + c.logger.Warningf("a sidecar is specified twice. Ignoring sidecar %q in favor of %q with high a precendence", + containerName, containerName) + } + + sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger) + + tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) + effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) + + podAnnotations := c.generatePodAnnotations(spec) // generate pod template for the statefulset, based on the spilo container and sidecars - if podTemplate, err = generatePodTemplate( + podTemplate, err = c.generatePodTemplate( c.Namespace, c.labelsSet(true), - annotations, + c.annotationsSet(podAnnotations), spiloContainer, initContainers, sidecarContainers, &tolerationSpec, + effectiveRunAsUser, + effectiveRunAsGroup, effectiveFSGroup, - nodeAffinity(c.OpConfig.NodeReadinessLabel), + nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity), + spec.SchedulerName, int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), c.OpConfig.PodServiceAccountName, c.OpConfig.KubeIAMRole, @@ -920,9 +1234,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef c.OpConfig.EnablePodAntiAffinity, c.OpConfig.PodAntiAffinityTopologyKey, c.OpConfig.AdditionalSecretMount, - c.OpConfig.AdditionalSecretMountPath); err != nil { - return nil, fmt.Errorf("could not generate pod template: %v", err) - } + c.OpConfig.AdditionalSecretMountPath, + additionalVolumes) if err != nil { return nil, fmt.Errorf("could not generate pod template: %v", err) @@ -949,12 +1262,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy) } + stsAnnotations := make(map[string]string) + stsAnnotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false) + stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil)) + statefulSet := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: c.statefulSetName(), Namespace: c.Namespace, Labels: c.labelsSet(true), - Annotations: map[string]string{rollingUpdateStatefulsetAnnotationKey: "false"}, + Annotations: stsAnnotations, }, Spec: appsv1.StatefulSetSpec{ Replicas: &numberOfInstances, @@ -989,57 +1306,44 @@ func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]s } func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string, - containerResources *acidv1.Resources, logger *logrus.Entry) *acidv1.Sidecar { + scalyrCPURequest string, scalyrMemoryRequest string, scalyrCPULimit string, scalyrMemoryLimit string, + defaultResources acidv1.Resources, logger *logrus.Entry) (*v1.Container, error) { if APIKey == "" || dockerImage == "" { if APIKey == "" && dockerImage != "" { logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined") } - return nil + return nil, nil } - scalarSpec := &acidv1.Sidecar{ - Name: "scalyr-sidecar", - DockerImage: dockerImage, - Env: []v1.EnvVar{ - { - Name: "SCALYR_API_KEY", - Value: APIKey, - }, - { - Name: "SCALYR_SERVER_HOST", - Value: clusterName, - }, + resourcesScalyrSidecar := makeResources( + scalyrCPURequest, + scalyrMemoryRequest, + scalyrCPULimit, + scalyrMemoryLimit, + ) + resourceRequirementsScalyrSidecar, err := generateResourceRequirements(resourcesScalyrSidecar, defaultResources) + if err != nil { + return nil, fmt.Errorf("invalid resources for Scalyr sidecar: %v", err) + } + env := []v1.EnvVar{ + { + Name: "SCALYR_API_KEY", + Value: APIKey, + }, + { + Name: "SCALYR_SERVER_HOST", + Value: clusterName, }, - Resources: *containerResources, } if serverURL != "" { - scalarSpec.Env = append(scalarSpec.Env, v1.EnvVar{Name: "SCALYR_SERVER_URL", Value: serverURL}) + env = append(env, v1.EnvVar{Name: "SCALYR_SERVER_URL", Value: serverURL}) } - return scalarSpec -} - -// mergeSidecar merges globally-defined sidecars with those defined in the cluster manifest -func (c *Cluster) mergeSidecars(sidecars []acidv1.Sidecar) []acidv1.Sidecar { - globalSidecarsToSkip := map[string]bool{} - result := make([]acidv1.Sidecar, 0) - - for i, sidecar := range sidecars { - dockerImage, ok := c.OpConfig.Sidecars[sidecar.Name] - if ok { - if dockerImage != sidecar.DockerImage { - c.logger.Warningf("merging definitions for sidecar %q: "+ - "ignoring %q in the global scope in favor of %q defined in the cluster", - sidecar.Name, dockerImage, sidecar.DockerImage) - } - globalSidecarsToSkip[sidecar.Name] = true - } - result = append(result, sidecars[i]) - } - for name, dockerImage := range c.OpConfig.Sidecars { - if !globalSidecarsToSkip[name] { - result = append(result, acidv1.Sidecar{Name: name, DockerImage: dockerImage}) - } - } - return result + return &v1.Container{ + Name: "scalyr-sidecar", + Image: dockerImage, + Env: env, + ImagePullPolicy: v1.PullIfNotPresent, + Resources: *resourceRequirementsScalyrSidecar, + }, nil } func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 { @@ -1117,6 +1421,69 @@ func addSecretVolume(podSpec *v1.PodSpec, additionalSecretMount string, addition podSpec.Volumes = volumes } +func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec, + additionalVolumes []acidv1.AdditionalVolume) { + + volumes := podSpec.Volumes + mountPaths := map[string]acidv1.AdditionalVolume{} + for i, v := range additionalVolumes { + if previousVolume, exist := mountPaths[v.MountPath]; exist { + msg := "Volume %+v cannot be mounted to the same path as %+v" + c.logger.Warningf(msg, v, previousVolume) + continue + } + + if v.MountPath == constants.PostgresDataMount { + msg := "Cannot mount volume on postgresql data directory, %+v" + c.logger.Warningf(msg, v) + continue + } + + if v.TargetContainers == nil { + spiloContainer := podSpec.Containers[0] + additionalVolumes[i].TargetContainers = []string{spiloContainer.Name} + } + + for _, target := range v.TargetContainers { + if target == "all" && len(v.TargetContainers) != 1 { + msg := `Target containers could be either "all" or a list + of containers, mixing those is not allowed, %+v` + c.logger.Warningf(msg, v) + continue + } + } + + volumes = append(volumes, + v1.Volume{ + Name: v.Name, + VolumeSource: v.VolumeSource, + }, + ) + + mountPaths[v.MountPath] = v + } + + c.logger.Infof("Mount additional volumes: %+v", additionalVolumes) + + for i := range podSpec.Containers { + mounts := podSpec.Containers[i].VolumeMounts + for _, v := range additionalVolumes { + for _, target := range v.TargetContainers { + if podSpec.Containers[i].Name == target || target == "all" { + mounts = append(mounts, v1.VolumeMount{ + Name: v.Name, + MountPath: v.MountPath, + SubPath: v.SubPath, + }) + } + } + } + podSpec.Containers[i].VolumeMounts = mounts + } + + podSpec.Volumes = volumes +} + func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) { var storageClassName *string @@ -1187,12 +1554,26 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) return nil } + //skip NOLOGIN users + for _, flag := range pgUser.Flags { + if flag == constants.RoleFlagNoLogin { + return nil + } + } + username := pgUser.Name + lbls := c.labelsSet(true) + + if username == constants.ConnectionPoolerUserName { + lbls = c.connectionPoolerLabels("", false).MatchLabels + } + secret := v1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: c.credentialSecretName(username), - Namespace: namespace, - Labels: c.labelsSet(true), + Name: c.credentialSecretName(username), + Namespace: namespace, + Labels: lbls, + Annotations: c.annotationsSet(nil), }, Type: v1.SecretTypeOpaque, Data: map[string][]byte{ @@ -1200,6 +1581,7 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) "password": []byte(pgUser.Password), }, } + return &secret } @@ -1237,7 +1619,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) Type: v1.ServiceTypeClusterIP, } - if role == Replica { + if role == Replica || c.patroniKubernetesUseConfigMaps() { serviceSpec.Selector = c.roleLabelsSet(false, role) } @@ -1253,6 +1635,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) } c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges) + serviceSpec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(c.OpConfig.ExternalTrafficPolicy) serviceSpec.Type = v1.ServiceTypeLoadBalancer } else if role == Replica { // before PR #258, the replica service was only created if allocated a LB @@ -1265,7 +1648,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) Name: c.serviceName(role), Namespace: c.Namespace, Labels: c.roleLabelsSet(true, role), - Annotations: c.generateServiceAnnotations(role, spec), + Annotations: c.annotationsSet(c.generateServiceAnnotations(role, spec)), }, Spec: serviceSpec, } @@ -1363,12 +1746,32 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) msg := "Figure out which S3 bucket to use from env" c.logger.Info(msg, description.S3WalPath) + if c.OpConfig.WALES3Bucket != "" { + envs := []v1.EnvVar{ + { + Name: "CLONE_WAL_S3_BUCKET", + Value: c.OpConfig.WALES3Bucket, + }, + } + result = append(result, envs...) + } else if c.OpConfig.WALGSBucket != "" { + envs := []v1.EnvVar{ + { + Name: "CLONE_WAL_GS_BUCKET", + Value: c.OpConfig.WALGSBucket, + }, + { + Name: "CLONE_GOOGLE_APPLICATION_CREDENTIALS", + Value: c.OpConfig.GCPCredentials, + }, + } + result = append(result, envs...) + } else { + c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.") + } + envs := []v1.EnvVar{ - v1.EnvVar{ - Name: "CLONE_WAL_S3_BUCKET", - Value: c.OpConfig.WALES3Bucket, - }, - v1.EnvVar{ + { Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.UID), }, @@ -1448,9 +1851,10 @@ func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget return &policybeta1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ - Name: c.podDisruptionBudgetName(), - Namespace: c.Namespace, - Labels: c.labelsSet(true), + Name: c.podDisruptionBudgetName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), }, Spec: policybeta1.PodDisruptionBudgetSpec{ MinAvailable: &minAvailable, @@ -1521,7 +1925,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { annotations := c.generatePodAnnotations(&c.Spec) // re-use the method that generates DB pod templates - if podTemplate, err = generatePodTemplate( + if podTemplate, err = c.generatePodTemplate( c.Namespace, labels, annotations, @@ -1530,7 +1934,10 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { []v1.Container{}, &[]v1.Toleration{}, nil, - nodeAffinity(c.OpConfig.NodeReadinessLabel), + nil, + nil, + nodeAffinity(c.OpConfig.NodeReadinessLabel, nil), + nil, int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), c.OpConfig.PodServiceAccountName, c.OpConfig.KubeIAMRole, @@ -1539,7 +1946,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { false, "", c.OpConfig.AdditionalSecretMount, - c.OpConfig.AdditionalSecretMountPath); err != nil { + c.OpConfig.AdditionalSecretMountPath, + []acidv1.AdditionalVolume{}); err != nil { return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err) } @@ -1566,9 +1974,10 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { cronJob := &batchv1beta1.CronJob{ ObjectMeta: metav1.ObjectMeta{ - Name: c.getLogicalBackupJobName(), - Namespace: c.Namespace, - Labels: c.labelsSet(true), + Name: c.getLogicalBackupJobName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), }, Spec: batchv1beta1.CronJobSpec{ Schedule: schedule, @@ -1601,6 +2010,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { }, }, // Bucket env vars + { + Name: "LOGICAL_BACKUP_PROVIDER", + Value: c.OpConfig.LogicalBackup.LogicalBackupProvider, + }, { Name: "LOGICAL_BACKUP_S3_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket, @@ -1621,10 +2034,14 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())), }, + { + Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", + Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials, + }, // Postgres env vars { Name: "PG_VERSION", - Value: c.Spec.PgVersion, + Value: c.Spec.PostgresqlParam.PgVersion, }, { Name: "PGPORT", @@ -1663,11 +2080,47 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { envVars = append(envVars, v1.EnvVar{Name: "AWS_SECRET_ACCESS_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey}) } - c.logger.Debugf("Generated logical backup env vars %v", envVars) + c.logger.Debugf("Generated logical backup env vars") + c.logger.Debugf("%v", envVars) return envVars } // getLogicalBackupJobName returns the name; the job itself may not exists func (c *Cluster) getLogicalBackupJobName() (jobName string) { - return "logical-backup-" + c.clusterName().Name + return c.OpConfig.LogicalBackupJobPrefix + c.clusterName().Name +} + +// Return an array of ownerReferences to make an arbitraty object dependent on +// the StatefulSet. Dependency is made on StatefulSet instead of PostgreSQL CRD +// while the former is represent the actual state, and only it's deletion means +// we delete the cluster (e.g. if CRD was deleted, StatefulSet somehow +// survived, we can't delete an object because it will affect the functioning +// cluster). +func (c *Cluster) ownerReferences() []metav1.OwnerReference { + controller := true + + if c.Statefulset == nil { + c.logger.Warning("Cannot get owner reference, no statefulset") + return []metav1.OwnerReference{} + } + + return []metav1.OwnerReference{ + { + UID: c.Statefulset.ObjectMeta.UID, + APIVersion: "apps/v1", + Kind: "StatefulSet", + Name: c.Statefulset.ObjectMeta.Name, + Controller: &controller, + }, + } +} + +func ensurePath(file string, defaultDir string, defaultFile string) string { + if file == "" { + return path.Join(defaultDir, defaultFile) + } + if !path.IsAbs(file) { + return path.Join(defaultDir, file) + } + return file } diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index e8fe05456..e880fcc3b 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1,23 +1,39 @@ package cluster import ( + "context" + "fmt" "reflect" - - v1 "k8s.io/api/core/v1" + "sort" "testing" + "github.com/stretchr/testify/assert" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" ) +// For testing purposes +type ExpectedValue struct { + envIndex int + envVarConstant string + envVarValue string +} + func toIntStr(val int) *intstr.IntOrString { b := intstr.FromInt(val) return &b @@ -33,7 +49,7 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { ReplicationUsername: replicationUserName, }, }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) testName := "TestGenerateSpiloConfig" tests := []struct { @@ -61,21 +77,23 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { "locale": "en_US.UTF-8", "data-checksums": "true", }, - PgHba: []string{"hostssl all all 0.0.0.0/0 md5", "host all all 0.0.0.0/0 md5"}, - TTL: 30, - LoopWait: 10, - RetryTimeout: 10, - MaximumLagOnFailover: 33554432, - Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}}, + PgHba: []string{"hostssl all all 0.0.0.0/0 md5", "host all all 0.0.0.0/0 md5"}, + TTL: 30, + LoopWait: 10, + RetryTimeout: 10, + MaximumLagOnFailover: 33554432, + SynchronousMode: true, + SynchronousModeStrict: true, + Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}}, }, role: "zalandos", opConfig: config.Config{}, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/11/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/11/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}}}}`, }, } for _, tt := range tests { cluster.OpConfig = tt.opConfig - result, err := generateSpiloJSONConfiguration(tt.pgParam, tt.patroni, tt.role, logger) + result, err := generateSpiloJSONConfiguration(tt.pgParam, tt.patroni, tt.role, false, logger) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -86,6 +104,119 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { } } +func TestGenerateSpiloPodEnvVars(t *testing.T) { + var cluster = New( + Config{ + OpConfig: config.Config{ + WALGSBucket: "wale-gs-bucket", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + expectedValuesGSBucket := []ExpectedValue{ + ExpectedValue{ + envIndex: 15, + envVarConstant: "WAL_GS_BUCKET", + envVarValue: "wale-gs-bucket", + }, + ExpectedValue{ + envIndex: 16, + envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", + envVarValue: "/SomeUUID", + }, + ExpectedValue{ + envIndex: 17, + envVarConstant: "WAL_BUCKET_SCOPE_PREFIX", + envVarValue: "", + }, + } + + expectedValuesGCPCreds := []ExpectedValue{ + ExpectedValue{ + envIndex: 15, + envVarConstant: "WAL_GS_BUCKET", + envVarValue: "wale-gs-bucket", + }, + ExpectedValue{ + envIndex: 16, + envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX", + envVarValue: "/SomeUUID", + }, + ExpectedValue{ + envIndex: 17, + envVarConstant: "WAL_BUCKET_SCOPE_PREFIX", + envVarValue: "", + }, + ExpectedValue{ + envIndex: 18, + envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS", + envVarValue: "some_path_to_credentials", + }, + } + + testName := "TestGenerateSpiloPodEnvVars" + tests := []struct { + subTest string + opConfig config.Config + uid types.UID + spiloConfig string + cloneDescription *acidv1.CloneDescription + standbyDescription *acidv1.StandbyDescription + customEnvList []v1.EnvVar + expectedValues []ExpectedValue + }{ + { + subTest: "Will set WAL_GS_BUCKET env", + opConfig: config.Config{ + WALGSBucket: "wale-gs-bucket", + }, + uid: "SomeUUID", + spiloConfig: "someConfig", + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{}, + customEnvList: []v1.EnvVar{}, + expectedValues: expectedValuesGSBucket, + }, + { + subTest: "Will set GOOGLE_APPLICATION_CREDENTIALS env", + opConfig: config.Config{ + WALGSBucket: "wale-gs-bucket", + GCPCredentials: "some_path_to_credentials", + }, + uid: "SomeUUID", + spiloConfig: "someConfig", + cloneDescription: &acidv1.CloneDescription{}, + standbyDescription: &acidv1.StandbyDescription{}, + customEnvList: []v1.EnvVar{}, + expectedValues: expectedValuesGCPCreds, + }, + } + + for _, tt := range tests { + cluster.OpConfig = tt.opConfig + + actualEnvs := cluster.generateSpiloPodEnvVars(tt.uid, tt.spiloConfig, tt.cloneDescription, tt.standbyDescription, tt.customEnvList) + + for _, ev := range tt.expectedValues { + env := actualEnvs[ev.envIndex] + + if env.Name != ev.envVarConstant { + t.Errorf("%s %s: Expected env name %s, have %s instead", + testName, tt.subTest, ev.envVarConstant, env.Name) + } + + if env.Value != ev.envVarValue { + t.Errorf("%s %s: Expected env value %s, have %s instead", + testName, tt.subTest, ev.envVarValue, env.Value) + } + } + } +} + func TestCreateLoadBalancerLogic(t *testing.T) { var cluster = New( Config{ @@ -96,7 +227,7 @@ func TestCreateLoadBalancerLogic(t *testing.T) { ReplicationUsername: replicationUserName, }, }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) testName := "TestCreateLoadBalancerLogic" tests := []struct { @@ -158,7 +289,8 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, - logger), + logger, + eventRecorder), policyv1beta1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ Name: "postgres-myapp-database-pdb", @@ -181,7 +313,8 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}}, - logger), + logger, + eventRecorder), policyv1beta1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ Name: "postgres-myapp-database-pdb", @@ -204,7 +337,8 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, - logger), + logger, + eventRecorder), policyv1beta1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ Name: "postgres-myapp-database-pdb", @@ -227,7 +361,8 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, - logger), + logger, + eventRecorder), policyv1beta1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ Name: "postgres-myapp-database-databass-budget", @@ -362,7 +497,7 @@ func TestCloneEnv(t *testing.T) { ReplicationUsername: replicationUserName, }, }, - }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) for _, tt := range tests { envs := cluster.generateCloneEnvironment(tt.cloneOpts) @@ -381,6 +516,46 @@ func TestCloneEnv(t *testing.T) { } } +func TestExtractPgVersionFromBinPath(t *testing.T) { + testName := "TestExtractPgVersionFromBinPath" + tests := []struct { + subTest string + binPath string + template string + expected string + }{ + { + subTest: "test current bin path with decimal against hard coded template", + binPath: "/usr/lib/postgresql/9.6/bin", + template: pgBinariesLocationTemplate, + expected: "9.6", + }, + { + subTest: "test current bin path against hard coded template", + binPath: "/usr/lib/postgresql/12/bin", + template: pgBinariesLocationTemplate, + expected: "12", + }, + { + subTest: "test alternative bin path against a matching template", + binPath: "/usr/pgsql-12/bin", + template: "/usr/pgsql-%v/bin", + expected: "12", + }, + } + + for _, tt := range tests { + pgVersion, err := extractPgVersionFromBinPath(tt.binPath, tt.template) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if pgVersion != tt.expected { + t.Errorf("%s %s: Expected version %s, have %s instead", + testName, tt.subTest, tt.expected, pgVersion) + } + } +} + func TestSecretVolume(t *testing.T) { testName := "TestSecretVolume" tests := []struct { @@ -451,3 +626,866 @@ func TestSecretVolume(t *testing.T) { } } } + +const ( + testPodEnvironmentConfigMapName = "pod_env_cm" + testPodEnvironmentSecretName = "pod_env_sc" +) + +type mockSecret struct { + v1core.SecretInterface +} + +type mockConfigMap struct { + v1core.ConfigMapInterface +} + +func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) { + if name != testPodEnvironmentSecretName { + return nil, fmt.Errorf("Secret PodEnvironmentSecret not found") + } + secret := &v1.Secret{} + secret.Name = testPodEnvironmentSecretName + secret.Data = map[string][]byte{ + "minio_access_key": []byte("alpha"), + "minio_secret_key": []byte("beta"), + } + return secret, nil +} + +func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) { + if name != testPodEnvironmentConfigMapName { + return nil, fmt.Errorf("NotFound") + } + configmap := &v1.ConfigMap{} + configmap.Name = testPodEnvironmentConfigMapName + configmap.Data = map[string]string{ + "foo1": "bar1", + "foo2": "bar2", + } + return configmap, nil +} + +type MockSecretGetter struct { +} + +type MockConfigMapsGetter struct { +} + +func (c *MockSecretGetter) Secrets(namespace string) v1core.SecretInterface { + return &mockSecret{} +} + +func (c *MockConfigMapsGetter) ConfigMaps(namespace string) v1core.ConfigMapInterface { + return &mockConfigMap{} +} + +func newMockKubernetesClient() k8sutil.KubernetesClient { + return k8sutil.KubernetesClient{ + SecretsGetter: &MockSecretGetter{}, + ConfigMapsGetter: &MockConfigMapsGetter{}, + } +} +func newMockCluster(opConfig config.Config) *Cluster { + cluster := &Cluster{ + Config: Config{OpConfig: opConfig}, + KubeClient: newMockKubernetesClient(), + } + return cluster +} + +func TestPodEnvironmentConfigMapVariables(t *testing.T) { + testName := "TestPodEnvironmentConfigMapVariables" + tests := []struct { + subTest string + opConfig config.Config + envVars []v1.EnvVar + err error + }{ + { + subTest: "no PodEnvironmentConfigMap", + envVars: []v1.EnvVar{}, + }, + { + subTest: "missing PodEnvironmentConfigMap", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: "idonotexist", + }, + }, + }, + err: fmt.Errorf("could not read PodEnvironmentConfigMap: NotFound"), + }, + { + subTest: "simple PodEnvironmentConfigMap", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentConfigMap: spec.NamespacedName{ + Name: testPodEnvironmentConfigMapName, + }, + }, + }, + envVars: []v1.EnvVar{ + { + Name: "foo1", + Value: "bar1", + }, + { + Name: "foo2", + Value: "bar2", + }, + }, + }, + } + for _, tt := range tests { + c := newMockCluster(tt.opConfig) + vars, err := c.getPodEnvironmentConfigMapVariables() + sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name }) + if !reflect.DeepEqual(vars, tt.envVars) { + t.Errorf("%s %s: expected `%v` but got `%v`", + testName, tt.subTest, tt.envVars, vars) + } + if tt.err != nil { + if err.Error() != tt.err.Error() { + t.Errorf("%s %s: expected error `%v` but got `%v`", + testName, tt.subTest, tt.err, err) + } + } else { + if err != nil { + t.Errorf("%s %s: expected no error but got error: `%v`", + testName, tt.subTest, err) + } + } + } +} + +// Test if the keys of an existing secret are properly referenced +func TestPodEnvironmentSecretVariables(t *testing.T) { + testName := "TestPodEnvironmentSecretVariables" + tests := []struct { + subTest string + opConfig config.Config + envVars []v1.EnvVar + err error + }{ + { + subTest: "No PodEnvironmentSecret configured", + envVars: []v1.EnvVar{}, + }, + { + subTest: "Secret referenced by PodEnvironmentSecret does not exist", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentSecret: "idonotexist", + }, + }, + err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: Secret PodEnvironmentSecret not found"), + }, + { + subTest: "Pod environment vars reference all keys from secret configured by PodEnvironmentSecret", + opConfig: config.Config{ + Resources: config.Resources{ + PodEnvironmentSecret: testPodEnvironmentSecretName, + }, + }, + envVars: []v1.EnvVar{ + { + Name: "minio_access_key", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "minio_access_key", + }, + }, + }, + { + Name: "minio_secret_key", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "minio_secret_key", + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + c := newMockCluster(tt.opConfig) + vars, err := c.getPodEnvironmentSecretVariables() + sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name }) + if !reflect.DeepEqual(vars, tt.envVars) { + t.Errorf("%s %s: expected `%v` but got `%v`", + testName, tt.subTest, tt.envVars, vars) + } + if tt.err != nil { + if err.Error() != tt.err.Error() { + t.Errorf("%s %s: expected error `%v` but got `%v`", + testName, tt.subTest, tt.err, err) + } + } else { + if err != nil { + t.Errorf("%s %s: expected no error but got error: `%v`", + testName, tt.subTest, err) + } + } + } + +} + +func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { + required := map[string]bool{ + "PGHOST": false, + "PGPORT": false, + "PGUSER": false, + "PGSCHEMA": false, + "PGPASSWORD": false, + "CONNECTION_POOLER_MODE": false, + "CONNECTION_POOLER_PORT": false, + } + + envs := podSpec.Spec.Containers[0].Env + for _, env := range envs { + required[env.Name] = true + } + + for env, value := range required { + if !value { + return fmt.Errorf("Environment variable %s is not present", env) + } + } + + return nil +} + +func TestNodeAffinity(t *testing.T) { + var err error + var spec acidv1.PostgresSpec + var cluster *Cluster + var spiloRunAsUser = int64(101) + var spiloRunAsGroup = int64(103) + var spiloFSGroup = int64(103) + + makeSpec := func(nodeAffinity *v1.NodeAffinity) acidv1.PostgresSpec { + return acidv1.PostgresSpec{ + TeamID: "myapp", NumberOfInstances: 1, + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + }, + Volume: acidv1.Volume{ + Size: "1G", + }, + NodeAffinity: nodeAffinity, + } + } + + cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + Resources: config.Resources{ + SpiloRunAsUser: &spiloRunAsUser, + SpiloRunAsGroup: &spiloRunAsGroup, + SpiloFSGroup: &spiloFSGroup, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + nodeAff := &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "test-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{ + "test-value", + }, + }, + }, + }, + }, + }, + } + spec = makeSpec(nodeAff) + s, err := cluster.generateStatefulSet(&spec) + if err != nil { + assert.NoError(t, err) + } + + assert.NotNil(t, s.Spec.Template.Spec.Affinity.NodeAffinity, "node affinity in statefulset shouldn't be nil") + assert.Equal(t, s.Spec.Template.Spec.Affinity.NodeAffinity, nodeAff, "cluster template has correct node affinity") +} + +func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { + if podSpec.ObjectMeta.Name != "test-pod-template" { + return fmt.Errorf("Custom pod template is not used, current spec %+v", + podSpec) + } + + return nil +} + +func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error { + owner := deployment.ObjectMeta.OwnerReferences[0] + + if owner.Name != cluster.Statefulset.ObjectMeta.Name { + return fmt.Errorf("Ownere reference is incorrect, got %s, expected %s", + owner.Name, cluster.Statefulset.ObjectMeta.Name) + } + + return nil +} + +func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error { + owner := service.ObjectMeta.OwnerReferences[0] + + if owner.Name != cluster.Statefulset.ObjectMeta.Name { + return fmt.Errorf("Ownere reference is incorrect, got %s, expected %s", + owner.Name, cluster.Statefulset.ObjectMeta.Name) + } + + return nil +} + +func TestTLS(t *testing.T) { + var err error + var spec acidv1.PostgresSpec + var cluster *Cluster + var spiloRunAsUser = int64(101) + var spiloRunAsGroup = int64(103) + var spiloFSGroup = int64(103) + var additionalVolumes = spec.AdditionalVolumes + + makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec { + return acidv1.PostgresSpec{ + TeamID: "myapp", NumberOfInstances: 1, + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + }, + Volume: acidv1.Volume{ + Size: "1G", + }, + TLS: &tls, + } + } + + cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + Resources: config.Resources{ + SpiloRunAsUser: &spiloRunAsUser, + SpiloRunAsGroup: &spiloRunAsGroup, + SpiloFSGroup: &spiloFSGroup, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"}) + s, err := cluster.generateStatefulSet(&spec) + if err != nil { + assert.NoError(t, err) + } + + fsGroup := int64(103) + assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned") + + defaultMode := int32(0640) + mountPath := "/tls" + additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{ + Name: spec.TLS.SecretName, + MountPath: mountPath, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: spec.TLS.SecretName, + DefaultMode: &defaultMode, + }, + }, + }) + + volume := v1.Volume{ + Name: "my-secret", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: "my-secret", + DefaultMode: &defaultMode, + }, + }, + } + assert.Contains(t, s.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume") + + assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ + MountPath: "/tls", + Name: "my-secret", + }, "the volume gets mounted in /tls") + + assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"}) + assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"}) + assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"}) +} + +func TestAdditionalVolume(t *testing.T) { + testName := "TestAdditionalVolume" + tests := []struct { + subTest string + podSpec *v1.PodSpec + volumePos int + }{ + { + subTest: "empty PodSpec", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{}, + Containers: []v1.Container{ + { + VolumeMounts: []v1.VolumeMount{}, + }, + }, + }, + volumePos: 0, + }, + { + subTest: "non empty PodSpec", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{{}}, + Containers: []v1.Container{ + { + Name: "postgres", + VolumeMounts: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + }, + }, + }, + }, + }, + volumePos: 1, + }, + { + subTest: "non empty PodSpec with sidecar", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{{}}, + Containers: []v1.Container{ + { + Name: "postgres", + VolumeMounts: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + }, + }, + }, + { + Name: "sidecar", + VolumeMounts: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + }, + }, + }, + }, + }, + volumePos: 1, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + for _, tt := range tests { + // Test with additional volume mounted in all containers + additionalVolumeMount := []acidv1.AdditionalVolume{ + { + Name: "test", + MountPath: "/test", + TargetContainers: []string{"all"}, + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + } + + numMounts := len(tt.podSpec.Containers[0].VolumeMounts) + + cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount) + volumeName := tt.podSpec.Volumes[tt.volumePos].Name + + if volumeName != additionalVolumeMount[0].Name { + t.Errorf("%s %s: Expected volume %v was not created, have %s instead", + testName, tt.subTest, additionalVolumeMount, volumeName) + } + + for i := range tt.podSpec.Containers { + volumeMountName := tt.podSpec.Containers[i].VolumeMounts[tt.volumePos].Name + + if volumeMountName != additionalVolumeMount[0].Name { + t.Errorf("%s %s: Expected mount %v was not created, have %s instead", + testName, tt.subTest, additionalVolumeMount, volumeMountName) + } + + } + + numMountsCheck := len(tt.podSpec.Containers[0].VolumeMounts) + + if numMountsCheck != numMounts+1 { + t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", + numMountsCheck, numMounts+1) + } + } + + for _, tt := range tests { + // Test with additional volume mounted only in first container + additionalVolumeMount := []acidv1.AdditionalVolume{ + { + Name: "test", + MountPath: "/test", + TargetContainers: []string{"postgres"}, + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + } + + numMounts := len(tt.podSpec.Containers[0].VolumeMounts) + + cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount) + volumeName := tt.podSpec.Volumes[tt.volumePos].Name + + if volumeName != additionalVolumeMount[0].Name { + t.Errorf("%s %s: Expected volume %v was not created, have %s instead", + testName, tt.subTest, additionalVolumeMount, volumeName) + } + + for _, container := range tt.podSpec.Containers { + if container.Name == "postgres" { + volumeMountName := container.VolumeMounts[tt.volumePos].Name + + if volumeMountName != additionalVolumeMount[0].Name { + t.Errorf("%s %s: Expected mount %v was not created, have %s instead", + testName, tt.subTest, additionalVolumeMount, volumeMountName) + } + + numMountsCheck := len(container.VolumeMounts) + if numMountsCheck != numMounts+1 { + t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", + numMountsCheck, numMounts+1) + } + } else { + numMountsCheck := len(container.VolumeMounts) + if numMountsCheck == numMounts+1 { + t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", + numMountsCheck, numMounts) + } + } + } + } +} + +// inject sidecars through all available mechanisms and check the resulting container specs +func TestSidecars(t *testing.T) { + var err error + var spec acidv1.PostgresSpec + var cluster *Cluster + + generateKubernetesResources := func(cpuRequest string, cpuLimit string, memoryRequest string, memoryLimit string) v1.ResourceRequirements { + parsedCPURequest, err := resource.ParseQuantity(cpuRequest) + assert.NoError(t, err) + parsedCPULimit, err := resource.ParseQuantity(cpuLimit) + assert.NoError(t, err) + parsedMemoryRequest, err := resource.ParseQuantity(memoryRequest) + assert.NoError(t, err) + parsedMemoryLimit, err := resource.ParseQuantity(memoryLimit) + assert.NoError(t, err) + return v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: parsedCPURequest, + v1.ResourceMemory: parsedMemoryRequest, + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: parsedCPULimit, + v1.ResourceMemory: parsedMemoryLimit, + }, + } + } + + spec = acidv1.PostgresSpec{ + TeamID: "myapp", NumberOfInstances: 1, + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + }, + Volume: acidv1.Volume{ + Size: "1G", + }, + Sidecars: []acidv1.Sidecar{ + acidv1.Sidecar{ + Name: "cluster-specific-sidecar", + }, + acidv1.Sidecar{ + Name: "cluster-specific-sidecar-with-resources", + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"}, + }, + }, + acidv1.Sidecar{ + Name: "replace-sidecar", + DockerImage: "overwrite-image", + }, + }, + } + + cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + Resources: config.Resources{ + DefaultCPURequest: "200m", + DefaultCPULimit: "500m", + DefaultMemoryRequest: "0.7Gi", + DefaultMemoryLimit: "1.3Gi", + }, + SidecarImages: map[string]string{ + "deprecated-global-sidecar": "image:123", + }, + SidecarContainers: []v1.Container{ + v1.Container{ + Name: "global-sidecar", + }, + // will be replaced by a cluster specific sidecar with the same name + v1.Container{ + Name: "replace-sidecar", + Image: "replaced-image", + }, + }, + Scalyr: config.Scalyr{ + ScalyrAPIKey: "abc", + ScalyrImage: "scalyr-image", + ScalyrCPURequest: "220m", + ScalyrCPULimit: "520m", + ScalyrMemoryRequest: "0.9Gi", + // ise default memory limit + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + s, err := cluster.generateStatefulSet(&spec) + assert.NoError(t, err) + + env := []v1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "POSTGRES_USER", + Value: superUserName, + }, + { + Name: "POSTGRES_PASSWORD", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "", + }, + Key: "password", + }, + }, + }, + } + mounts := []v1.VolumeMount{ + v1.VolumeMount{ + Name: "pgdata", + MountPath: "/home/postgres/pgdata", + }, + } + + // deduplicated sidecars and Patroni + assert.Equal(t, 7, len(s.Spec.Template.Spec.Containers), "wrong number of containers") + + // cluster specific sidecar + assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{ + Name: "cluster-specific-sidecar", + Env: env, + Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"), + ImagePullPolicy: v1.PullIfNotPresent, + VolumeMounts: mounts, + }) + + // container specific resources + expectedResources := generateKubernetesResources("210m", "510m", "0.8Gi", "1.4Gi") + assert.Equal(t, expectedResources.Requests[v1.ResourceCPU], s.Spec.Template.Spec.Containers[2].Resources.Requests[v1.ResourceCPU]) + assert.Equal(t, expectedResources.Limits[v1.ResourceCPU], s.Spec.Template.Spec.Containers[2].Resources.Limits[v1.ResourceCPU]) + assert.Equal(t, expectedResources.Requests[v1.ResourceMemory], s.Spec.Template.Spec.Containers[2].Resources.Requests[v1.ResourceMemory]) + assert.Equal(t, expectedResources.Limits[v1.ResourceMemory], s.Spec.Template.Spec.Containers[2].Resources.Limits[v1.ResourceMemory]) + + // deprecated global sidecar + assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{ + Name: "deprecated-global-sidecar", + Image: "image:123", + Env: env, + Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"), + ImagePullPolicy: v1.PullIfNotPresent, + VolumeMounts: mounts, + }) + + // global sidecar + assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{ + Name: "global-sidecar", + Env: env, + VolumeMounts: mounts, + }) + + // replaced sidecar + assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{ + Name: "replace-sidecar", + Image: "overwrite-image", + Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"), + ImagePullPolicy: v1.PullIfNotPresent, + Env: env, + VolumeMounts: mounts, + }) + + // replaced sidecar + // the order in env is important + scalyrEnv := append(env, v1.EnvVar{Name: "SCALYR_API_KEY", Value: "abc"}, v1.EnvVar{Name: "SCALYR_SERVER_HOST", Value: ""}) + assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{ + Name: "scalyr-sidecar", + Image: "scalyr-image", + Resources: generateKubernetesResources("220m", "520m", "0.9Gi", "1.3Gi"), + ImagePullPolicy: v1.PullIfNotPresent, + Env: scalyrEnv, + VolumeMounts: mounts, + }) + +} + +func TestGenerateService(t *testing.T) { + var spec acidv1.PostgresSpec + var cluster *Cluster + var enableLB bool = true + spec = acidv1.PostgresSpec{ + TeamID: "myapp", NumberOfInstances: 1, + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + }, + Volume: acidv1.Volume{ + Size: "1G", + }, + Sidecars: []acidv1.Sidecar{ + acidv1.Sidecar{ + Name: "cluster-specific-sidecar", + }, + acidv1.Sidecar{ + Name: "cluster-specific-sidecar-with-resources", + Resources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"}, + }, + }, + acidv1.Sidecar{ + Name: "replace-sidecar", + DockerImage: "overwrite-image", + }, + }, + EnableMasterLoadBalancer: &enableLB, + } + + cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + Resources: config.Resources{ + DefaultCPURequest: "200m", + DefaultCPULimit: "500m", + DefaultMemoryRequest: "0.7Gi", + DefaultMemoryLimit: "1.3Gi", + }, + SidecarImages: map[string]string{ + "deprecated-global-sidecar": "image:123", + }, + SidecarContainers: []v1.Container{ + v1.Container{ + Name: "global-sidecar", + }, + // will be replaced by a cluster specific sidecar with the same name + v1.Container{ + Name: "replace-sidecar", + Image: "replaced-image", + }, + }, + Scalyr: config.Scalyr{ + ScalyrAPIKey: "abc", + ScalyrImage: "scalyr-image", + ScalyrCPURequest: "220m", + ScalyrCPULimit: "520m", + ScalyrMemoryRequest: "0.9Gi", + // ise default memory limit + }, + ExternalTrafficPolicy: "Cluster", + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + service := cluster.generateService(Master, &spec) + assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeCluster, service.Spec.ExternalTrafficPolicy) + cluster.OpConfig.ExternalTrafficPolicy = "Local" + service = cluster.generateService(Master, &spec) + assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy) + +} diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index 095f859f0..cf43de9a7 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -1,8 +1,10 @@ package cluster import ( + "context" "fmt" "math/rand" + "time" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -10,6 +12,7 @@ import ( "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/retryutil" ) func (c *Cluster) listPods() ([]v1.Pod, error) { @@ -17,7 +20,7 @@ func (c *Cluster) listPods() ([]v1.Pod, error) { LabelSelector: c.labelsSet(false).String(), } - pods, err := c.KubeClient.Pods(c.Namespace).List(listOptions) + pods, err := c.KubeClient.Pods(c.Namespace).List(context.TODO(), listOptions) if err != nil { return nil, fmt.Errorf("could not get list of pods: %v", err) } @@ -30,7 +33,7 @@ func (c *Cluster) getRolePods(role PostgresRole) ([]v1.Pod, error) { LabelSelector: c.roleLabelsSet(false, role).String(), } - pods, err := c.KubeClient.Pods(c.Namespace).List(listOptions) + pods, err := c.KubeClient.Pods(c.Namespace).List(context.TODO(), listOptions) if err != nil { return nil, fmt.Errorf("could not get list of pods: %v", err) } @@ -73,7 +76,7 @@ func (c *Cluster) deletePod(podName spec.NamespacedName) error { ch := c.registerPodSubscriber(podName) defer c.unregisterPodSubscriber(podName) - if err := c.KubeClient.Pods(podName.Namespace).Delete(podName.Name, c.deleteOptions); err != nil { + if err := c.KubeClient.Pods(podName.Namespace).Delete(context.TODO(), podName.Name, c.deleteOptions); err != nil { return err } @@ -183,7 +186,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { eol bool ) - oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{}) + oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(context.TODO(), podName.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("could not get pod: %v", err) @@ -206,7 +209,9 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { // we must have a statefulset in the cluster for the migration to work if c.Statefulset == nil { var sset *appsv1.StatefulSet - if sset, err = c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), + if sset, err = c.KubeClient.StatefulSets(c.Namespace).Get( + context.TODO(), + c.statefulSetName(), metav1.GetOptions{}); err != nil { return fmt.Errorf("could not retrieve cluster statefulset: %v", err) } @@ -247,7 +252,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { // MigrateReplicaPod recreates pod on a new node func (c *Cluster) MigrateReplicaPod(podName spec.NamespacedName, fromNodeName string) error { - replicaPod, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{}) + replicaPod, err := c.KubeClient.Pods(podName.Namespace).Get(context.TODO(), podName.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("could not get pod: %v", err) } @@ -276,7 +281,7 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) { defer c.unregisterPodSubscriber(podName) stopChan := make(chan struct{}) - if err := c.KubeClient.Pods(podName.Namespace).Delete(podName.Name, c.deleteOptions); err != nil { + if err := c.KubeClient.Pods(podName.Namespace).Delete(context.TODO(), podName.Name, c.deleteOptions); err != nil { return nil, fmt.Errorf("could not delete pod: %v", err) } @@ -291,6 +296,50 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) { return pod, nil } +func (c *Cluster) isSafeToRecreatePods(pods *v1.PodList) bool { + + /* + Operator should not re-create pods if there is at least one replica being bootstrapped + because Patroni might use other replicas to take basebackup from (see Patroni's "clonefrom" tag). + + XXX operator cannot forbid replica re-init, so we might still fail if re-init is started + after this check succeeds but before a pod is re-created + */ + + for _, pod := range pods.Items { + c.logger.Debugf("name=%s phase=%s ip=%s", pod.Name, pod.Status.Phase, pod.Status.PodIP) + } + + for _, pod := range pods.Items { + + var state string + + err := retryutil.Retry(1*time.Second, 5*time.Second, + func() (bool, error) { + + var err error + + state, err = c.patroni.GetPatroniMemberState(&pod) + + if err != nil { + return false, err + } + return true, nil + }, + ) + + if err != nil { + c.logger.Errorf("failed to get Patroni state for pod: %s", err) + return false + } else if state == "creating replica" { + c.logger.Warningf("cannot re-create replica %s: it is currently being initialized", pod.Name) + return false + } + + } + return true +} + func (c *Cluster) recreatePods() error { c.setProcessName("starting to recreate pods") ls := c.labelsSet(false) @@ -300,12 +349,16 @@ func (c *Cluster) recreatePods() error { LabelSelector: ls.String(), } - pods, err := c.KubeClient.Pods(namespace).List(listOptions) + pods, err := c.KubeClient.Pods(namespace).List(context.TODO(), listOptions) if err != nil { return fmt.Errorf("could not get the list of pods: %v", err) } c.logger.Infof("there are %d pods in the cluster to recreate", len(pods.Items)) + if !c.isSafeToRecreatePods(pods) { + return fmt.Errorf("postpone pod recreation until next Sync: recreation is unsafe because pods are being initialized") + } + var ( masterPod, newMasterPod, newPod *v1.Pod ) @@ -349,7 +402,7 @@ func (c *Cluster) recreatePods() error { } func (c *Cluster) podIsEndOfLife(pod *v1.Pod) (bool, error) { - node, err := c.KubeClient.Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := c.KubeClient.Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index d6c2149bf..bcc568adc 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -1,6 +1,7 @@ package cluster import ( + "context" "fmt" "strconv" "strings" @@ -80,7 +81,10 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) { if err != nil { return nil, fmt.Errorf("could not generate statefulset: %v", err) } - statefulSet, err := c.KubeClient.StatefulSets(statefulSetSpec.Namespace).Create(statefulSetSpec) + statefulSet, err := c.KubeClient.StatefulSets(statefulSetSpec.Namespace).Create( + context.TODO(), + statefulSetSpec, + metav1.CreateOptions{}) if err != nil { return nil, err } @@ -125,7 +129,7 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error { } podName := fmt.Sprintf("%s-0", c.Statefulset.Name) - masterCandidatePod, err := c.KubeClient.Pods(c.clusterNamespace()).Get(podName, metav1.GetOptions{}) + masterCandidatePod, err := c.KubeClient.Pods(c.clusterNamespace()).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("could not get master candidate pod: %v", err) } @@ -145,7 +149,7 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error { // setRollingUpdateFlagForStatefulSet sets the indicator or the rolling update requirement // in the StatefulSet annotation. -func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, val bool) { +func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, val bool, msg string) { anno := sset.GetAnnotations() if anno == nil { anno = make(map[string]string) @@ -153,13 +157,13 @@ func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, v anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val) sset.SetAnnotations(anno) - c.logger.Debugf("statefulset's rolling update annotation has been set to %t", val) + c.logger.Debugf("set statefulset's rolling update annotation to %t: caller/reason %s", val, msg) } // applyRollingUpdateFlagforStatefulSet sets the rolling update flag for the cluster's StatefulSet // and applies that setting to the actual running cluster. func (c *Cluster) applyRollingUpdateFlagforStatefulSet(val bool) error { - c.setRollingUpdateFlagForStatefulSet(c.Statefulset, val) + c.setRollingUpdateFlagForStatefulSet(c.Statefulset, val, "applyRollingUpdateFlag") sset, err := c.updateStatefulSetAnnotations(c.Statefulset.GetAnnotations()) if err != nil { return err @@ -211,22 +215,24 @@ func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *appsv1.St podsRollingUpdateRequired = false } else { c.logger.Infof("found a statefulset with an unfinished rolling update of the pods") - } } return podsRollingUpdateRequired } func (c *Cluster) updateStatefulSetAnnotations(annotations map[string]string) (*appsv1.StatefulSet, error) { - c.logger.Debugf("updating statefulset annotations") + c.logger.Debugf("patching statefulset annotations") patchData, err := metaAnnotationsPatch(annotations) if err != nil { return nil, fmt.Errorf("could not form patch for the statefulset metadata: %v", err) } result, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch( + context.TODO(), c.Statefulset.Name, types.MergePatchType, - []byte(patchData), "") + []byte(patchData), + metav1.PatchOptions{}, + "") if err != nil { return nil, fmt.Errorf("could not patch statefulset annotations %q: %v", patchData, err) } @@ -254,9 +260,12 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error { } statefulSet, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch( + context.TODO(), c.Statefulset.Name, types.MergePatchType, - patchData, "") + patchData, + metav1.PatchOptions{}, + "") if err != nil { return fmt.Errorf("could not patch statefulset spec %q: %v", statefulSetName, err) } @@ -288,7 +297,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error { oldStatefulset := c.Statefulset options := metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy} - err := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Delete(oldStatefulset.Name, &options) + err := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Delete(context.TODO(), oldStatefulset.Name, options) if err != nil { return fmt.Errorf("could not delete statefulset %q: %v", statefulSetName, err) } @@ -299,7 +308,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error { err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, func() (bool, error) { - _, err2 := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Get(oldStatefulset.Name, metav1.GetOptions{}) + _, err2 := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Get(context.TODO(), oldStatefulset.Name, metav1.GetOptions{}) if err2 == nil { return false, nil } @@ -313,7 +322,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error { } // create the new statefulset with the desired spec. It would take over the remaining pods. - createdStatefulset, err := c.KubeClient.StatefulSets(newStatefulSet.Namespace).Create(newStatefulSet) + createdStatefulset, err := c.KubeClient.StatefulSets(newStatefulSet.Namespace).Create(context.TODO(), newStatefulSet, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("could not create statefulset %q: %v", statefulSetName, err) } @@ -334,7 +343,7 @@ func (c *Cluster) deleteStatefulSet() error { return fmt.Errorf("there is no statefulset in the cluster") } - err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Delete(c.Statefulset.Name, c.deleteOptions) + err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Delete(context.TODO(), c.Statefulset.Name, c.deleteOptions) if err != nil { return err } @@ -356,7 +365,7 @@ func (c *Cluster) createService(role PostgresRole) (*v1.Service, error) { c.setProcessName("creating %v service", role) serviceSpec := c.generateService(role, &c.Spec) - service, err := c.KubeClient.Services(serviceSpec.Namespace).Create(serviceSpec) + service, err := c.KubeClient.Services(serviceSpec.Namespace).Create(context.TODO(), serviceSpec, metav1.CreateOptions{}) if err != nil { return nil, err } @@ -383,9 +392,12 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error if len(newService.ObjectMeta.Annotations) > 0 { if annotationsPatchData, err := metaAnnotationsPatch(newService.ObjectMeta.Annotations); err == nil { _, err = c.KubeClient.Services(serviceName.Namespace).Patch( + context.TODO(), serviceName.Name, types.MergePatchType, - []byte(annotationsPatchData), "") + []byte(annotationsPatchData), + metav1.PatchOptions{}, + "") if err != nil { return fmt.Errorf("could not replace annotations for the service %q: %v", serviceName, err) @@ -402,7 +414,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error if newServiceType == "ClusterIP" && newServiceType != oldServiceType { newService.ResourceVersion = c.Services[role].ResourceVersion newService.Spec.ClusterIP = c.Services[role].Spec.ClusterIP - svc, err = c.KubeClient.Services(serviceName.Namespace).Update(newService) + svc, err = c.KubeClient.Services(serviceName.Namespace).Update(context.TODO(), newService, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("could not update service %q: %v", serviceName, err) } @@ -413,9 +425,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error } svc, err = c.KubeClient.Services(serviceName.Namespace).Patch( - serviceName.Name, - types.MergePatchType, - patchData, "") + context.TODO(), serviceName.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "") if err != nil { return fmt.Errorf("could not patch service %q: %v", serviceName, err) } @@ -434,7 +444,7 @@ func (c *Cluster) deleteService(role PostgresRole) error { return nil } - if err := c.KubeClient.Services(service.Namespace).Delete(service.Name, c.deleteOptions); err != nil { + if err := c.KubeClient.Services(service.Namespace).Delete(context.TODO(), service.Name, c.deleteOptions); err != nil { return err } @@ -458,7 +468,7 @@ func (c *Cluster) createEndpoint(role PostgresRole) (*v1.Endpoints, error) { } endpointsSpec := c.generateEndpoint(role, subsets) - endpoints, err := c.KubeClient.Endpoints(endpointsSpec.Namespace).Create(endpointsSpec) + endpoints, err := c.KubeClient.Endpoints(endpointsSpec.Namespace).Create(context.TODO(), endpointsSpec, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("could not create %s endpoint: %v", role, err) } @@ -500,7 +510,7 @@ func (c *Cluster) createPodDisruptionBudget() (*policybeta1.PodDisruptionBudget, podDisruptionBudgetSpec := c.generatePodDisruptionBudget() podDisruptionBudget, err := c.KubeClient. PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace). - Create(podDisruptionBudgetSpec) + Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{}) if err != nil { return nil, err @@ -521,7 +531,7 @@ func (c *Cluster) updatePodDisruptionBudget(pdb *policybeta1.PodDisruptionBudget newPdb, err := c.KubeClient. PodDisruptionBudgets(pdb.Namespace). - Create(pdb) + Create(context.TODO(), pdb, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("could not create pod disruption budget: %v", err) } @@ -539,7 +549,7 @@ func (c *Cluster) deletePodDisruptionBudget() error { pdbName := util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta) err := c.KubeClient. PodDisruptionBudgets(c.PodDisruptionBudget.Namespace). - Delete(c.PodDisruptionBudget.Name, c.deleteOptions) + Delete(context.TODO(), c.PodDisruptionBudget.Name, c.deleteOptions) if err != nil { return fmt.Errorf("could not delete pod disruption budget: %v", err) } @@ -548,7 +558,7 @@ func (c *Cluster) deletePodDisruptionBudget() error { err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, func() (bool, error) { - _, err2 := c.KubeClient.PodDisruptionBudgets(pdbName.Namespace).Get(pdbName.Name, metav1.GetOptions{}) + _, err2 := c.KubeClient.PodDisruptionBudgets(pdbName.Namespace).Get(context.TODO(), pdbName.Name, metav1.GetOptions{}) if err2 == nil { return false, nil } @@ -571,7 +581,8 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error { return fmt.Errorf("there is no %s endpoint in the cluster", role) } - if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(c.Endpoints[role].Name, c.deleteOptions); err != nil { + if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete( + context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil { return fmt.Errorf("could not delete endpoint: %v", err) } @@ -582,17 +593,37 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error { return nil } -func (c *Cluster) deleteSecret(secret *v1.Secret) error { - c.setProcessName("deleting secret %q", util.NameFromMeta(secret.ObjectMeta)) - c.logger.Debugf("deleting secret %q", util.NameFromMeta(secret.ObjectMeta)) - err := c.KubeClient.Secrets(secret.Namespace).Delete(secret.Name, c.deleteOptions) - if err != nil { - return err +func (c *Cluster) deleteSecrets() error { + c.setProcessName("deleting secrets") + var errors []string + errorCount := 0 + for uid, secret := range c.Secrets { + err := c.deleteSecret(uid, *secret) + if err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + errorCount++ + } } - c.logger.Infof("secret %q has been deleted", util.NameFromMeta(secret.ObjectMeta)) - delete(c.Secrets, secret.UID) - return err + if errorCount > 0 { + return fmt.Errorf("could not delete all secrets: %v", errors) + } + + return nil +} + +func (c *Cluster) deleteSecret(uid types.UID, secret v1.Secret) error { + c.setProcessName("deleting secret") + secretName := util.NameFromMeta(secret.ObjectMeta) + c.logger.Debugf("deleting secret %q", secretName) + err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions) + if err != nil { + return fmt.Errorf("could not delete secret %q: %v", secretName, err) + } + c.logger.Infof("secret %q has been deleted", secretName) + c.Secrets[uid] = nil + + return nil } func (c *Cluster) createRoles() (err error) { @@ -610,7 +641,7 @@ func (c *Cluster) createLogicalBackupJob() (err error) { } c.logger.Debugf("Generated cronJobSpec: %v", logicalBackupJobSpec) - _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(logicalBackupJobSpec) + _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(context.TODO(), logicalBackupJobSpec, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("could not create k8s cron job: %v", err) } @@ -628,9 +659,12 @@ func (c *Cluster) patchLogicalBackupJob(newJob *batchv1beta1.CronJob) error { // update the backup job spec _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Patch( + context.TODO(), c.getLogicalBackupJobName(), types.MergePatchType, - patchData, "") + patchData, + metav1.PatchOptions{}, + "") if err != nil { return fmt.Errorf("could not patch logical backup job: %v", err) } @@ -642,7 +676,7 @@ func (c *Cluster) deleteLogicalBackupJob() error { c.logger.Info("removing the logical backup job") - return c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(c.getLogicalBackupJobName(), c.deleteOptions) + return c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions) } // GetServiceMaster returns cluster's kubernetes master Service diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 053db9ff7..5c0f6ce84 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -1,19 +1,21 @@ package cluster import ( + "context" "fmt" - - batchv1beta1 "k8s.io/api/batch/v1beta1" - v1 "k8s.io/api/core/v1" - policybeta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "regexp" + "strings" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" - "github.com/zalando/postgres-operator/pkg/util/volumes" + appsv1 "k8s.io/api/apps/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + v1 "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Sync syncs the cluster, making sure the actual Kubernetes objects correspond to what is defined in the manifest. @@ -23,14 +25,15 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { c.mu.Lock() defer c.mu.Unlock() + oldSpec := c.Postgresql c.setSpec(newSpec) defer func() { if err != nil { c.logger.Warningf("error while syncing cluster state: %v", err) - c.setStatus(acidv1.ClusterStatusSyncFailed) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed) } else if !c.Status.Running() { - c.setStatus(acidv1.ClusterStatusRunning) + c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) } }() @@ -39,29 +42,25 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { return err } - c.logger.Debugf("syncing secrets") - //TODO: mind the secrets of the deleted/new users if err = c.syncSecrets(); err != nil { err = fmt.Errorf("could not sync secrets: %v", err) return err } - c.logger.Debugf("syncing services") if err = c.syncServices(); err != nil { err = fmt.Errorf("could not sync services: %v", err) return err } - // potentially enlarge volumes before changing the statefulset. By doing that - // in this order we make sure the operator is not stuck waiting for a pod that - // cannot start because it ran out of disk space. - // TODO: handle the case of the cluster that is downsized and enlarged again - // (there will be a volume from the old pod for which we can't act before the - // the statefulset modification is concluded) - c.logger.Debugf("syncing persistent volumes") + if c.OpConfig.EnableEBSGp3Migration { + err = c.executeEBSMigration() + if nil != err { + return err + } + } + if err = c.syncVolumes(); err != nil { - err = fmt.Errorf("could not sync persistent volumes: %v", err) return err } @@ -106,6 +105,16 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { err = fmt.Errorf("could not sync databases: %v", err) return err } + c.logger.Debugf("syncing prepared databases with schemas") + if err = c.syncPreparedDatabases(); err != nil { + err = fmt.Errorf("could not sync prepared database: %v", err) + return err + } + } + + // sync connection pooler + if _, err = c.syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil { + return fmt.Errorf("could not sync connection pooler: %v", err) } return err @@ -115,10 +124,11 @@ func (c *Cluster) syncServices() error { for _, role := range []PostgresRole{Master, Replica} { c.logger.Debugf("syncing %s service", role) - if err := c.syncEndpoint(role); err != nil { - return fmt.Errorf("could not sync %s endpoint: %v", role, err) + if !c.patroniKubernetesUseConfigMaps() { + if err := c.syncEndpoint(role); err != nil { + return fmt.Errorf("could not sync %s endpoint: %v", role, err) + } } - if err := c.syncService(role); err != nil { return fmt.Errorf("could not sync %s service: %v", role, err) } @@ -134,7 +144,7 @@ func (c *Cluster) syncService(role PostgresRole) error { ) c.setProcessName("syncing %s service", role) - if svc, err = c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{}); err == nil { + if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil { c.Services[role] = svc desiredSvc := c.generateService(role, &c.Spec) if match, reason := k8sutil.SameService(svc, desiredSvc); !match { @@ -160,7 +170,7 @@ func (c *Cluster) syncService(role PostgresRole) error { return fmt.Errorf("could not create missing %s service: %v", role, err) } c.logger.Infof("%s service %q already exists", role, util.NameFromMeta(svc.ObjectMeta)) - if svc, err = c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{}); err != nil { + if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err != nil { return fmt.Errorf("could not fetch existing %s service: %v", role, err) } } @@ -175,7 +185,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { ) c.setProcessName("syncing %s endpoint", role) - if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}); err == nil { + if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err == nil { // TODO: No syncing of endpoints here, is this covered completely by updateService? c.Endpoints[role] = ep return nil @@ -194,7 +204,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { return fmt.Errorf("could not create missing %s endpoint: %v", role, err) } c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta)) - if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}); err != nil { + if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err != nil { return fmt.Errorf("could not fetch existing %s endpoint: %v", role, err) } } @@ -207,7 +217,7 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { pdb *policybeta1.PodDisruptionBudget err error ) - if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil { + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil { c.PodDisruptionBudget = pdb newPDB := c.generatePodDisruptionBudget() if match, reason := k8sutil.SamePDB(pdb, newPDB); !match { @@ -233,7 +243,7 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { return fmt.Errorf("could not create pod disruption budget: %v", err) } c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta)) - if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil { + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil { return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta)) } } @@ -244,12 +254,34 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { return nil } +func (c *Cluster) mustUpdatePodsAfterLazyUpdate(desiredSset *appsv1.StatefulSet) (bool, error) { + + pods, err := c.listPods() + if err != nil { + return false, fmt.Errorf("could not list pods of the statefulset: %v", err) + } + + for _, pod := range pods { + + effectivePodImage := pod.Spec.Containers[0].Image + ssImage := desiredSset.Spec.Template.Spec.Containers[0].Image + + if ssImage != effectivePodImage { + c.logger.Infof("not all pods were re-started when the lazy upgrade was enabled; forcing the rolling upgrade now") + return true, nil + } + + } + + return false, nil +} + func (c *Cluster) syncStatefulSet() error { var ( podsRollingUpdateRequired bool ) // NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early. - sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), metav1.GetOptions{}) + sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(context.TODO(), c.statefulSetName(), metav1.GetOptions{}) if err != nil { if !k8sutil.ResourceNotFound(err) { return fmt.Errorf("could not get statefulset: %v", err) @@ -289,13 +321,13 @@ func (c *Cluster) syncStatefulSet() error { if err != nil { return fmt.Errorf("could not generate statefulset: %v", err) } - c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired) + c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "from cache") cmp := c.compareStatefulSetWith(desiredSS) if !cmp.match { if cmp.rollingUpdate && !podsRollingUpdateRequired { podsRollingUpdateRequired = true - c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired) + c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "statefulset changes") } c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons) @@ -310,6 +342,21 @@ func (c *Cluster) syncStatefulSet() error { } } } + + c.updateStatefulSetAnnotations(c.AnnotationsToPropagate(c.annotationsSet(c.Statefulset.Annotations))) + + if !podsRollingUpdateRequired && !c.OpConfig.EnableLazySpiloUpgrade { + // even if desired and actual statefulsets match + // there still may be not up-to-date pods on condition + // (a) the lazy update was just disabled + // and + // (b) some of the pods were not restarted when the lazy update was still in place + podsRollingUpdateRequired, err = c.mustUpdatePodsAfterLazyUpdate(desiredSS) + if err != nil { + return fmt.Errorf("could not list pods of the statefulset: %v", err) + } + } + } // Apply special PostgreSQL parameters that can only be set via the Patroni API. @@ -323,10 +370,12 @@ func (c *Cluster) syncStatefulSet() error { // statefulset or those that got their configuration from the outdated statefulset) if podsRollingUpdateRequired { c.logger.Debugln("performing rolling update") + c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Performing rolling update") if err := c.recreatePods(); err != nil { return fmt.Errorf("could not recreate pods: %v", err) } c.logger.Infof("pods have been recreated") + c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Rolling update done - pods have been recreated") if err := c.applyRollingUpdateFlagforStatefulSet(false); err != nil { c.logger.Warningf("could not clear rolling update for the statefulset: %v", err) } @@ -334,6 +383,38 @@ func (c *Cluster) syncStatefulSet() error { return nil } +// AnnotationsToPropagate get the annotations to update if required +// based on the annotations in postgres CRD +func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string { + + if annotations == nil { + annotations = make(map[string]string) + } + + pgCRDAnnotations := c.ObjectMeta.Annotations + + if pgCRDAnnotations != nil { + for _, anno := range c.OpConfig.DownscalerAnnotations { + for k, v := range pgCRDAnnotations { + matched, err := regexp.MatchString(anno, k) + if err != nil { + c.logger.Errorf("annotations matching issue: %v", err) + return nil + } + if matched { + annotations[k] = v + } + } + } + } + + if len(annotations) > 0 { + return annotations + } + + return nil +} + // checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters // (like max_connections) has changed and if necessary sets it via the Patroni API func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration() error { @@ -382,25 +463,27 @@ func (c *Cluster) syncSecrets() error { err error secret *v1.Secret ) + c.logger.Info("syncing secrets") c.setProcessName("syncing secrets") secrets := c.generateUserSecrets() for secretUsername, secretSpec := range secrets { - if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(secretSpec); err == nil { + if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(context.TODO(), secretSpec, metav1.CreateOptions{}); err == nil { c.Secrets[secret.UID] = secret c.logger.Debugf("created new secret %q, uid: %q", util.NameFromMeta(secret.ObjectMeta), secret.UID) continue } if k8sutil.ResourceAlreadyExists(err) { var userMap map[string]spec.PgUser - if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Get(secretSpec.Name, metav1.GetOptions{}); err != nil { + if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Get(context.TODO(), secretSpec.Name, metav1.GetOptions{}); err != nil { return fmt.Errorf("could not get current secret: %v", err) } if secretUsername != string(secret.Data["username"]) { - c.logger.Warningf("secret %q does not contain the role %q", secretSpec.Name, secretUsername) + c.logger.Errorf("secret %s does not contain the role %q", secretSpec.Name, secretUsername) continue } - c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta)) + c.Secrets[secret.UID] = secret + c.logger.Debugf("secret %s already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta)) if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name { secretUsername = constants.SuperuserKeyName userMap = c.systemUsers @@ -412,9 +495,11 @@ func (c *Cluster) syncSecrets() error { } pwdUser := userMap[secretUsername] // if this secret belongs to the infrastructure role and the password has changed - replace it in the secret - if pwdUser.Password != string(secret.Data["password"]) && pwdUser.Origin == spec.RoleOriginInfrastructure { + if pwdUser.Password != string(secret.Data["password"]) && + pwdUser.Origin == spec.RoleOriginInfrastructure { + c.logger.Debugf("updating the secret %q from the infrastructure roles", secretSpec.Name) - if _, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(secretSpec); err != nil { + if _, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(context.TODO(), secretSpec, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("could not update infrastructure role secret for role %q: %v", secretUsername, err) } } else { @@ -456,6 +541,16 @@ func (c *Cluster) syncRoles() (err error) { for _, u := range c.pgUsers { userNames = append(userNames, u.Name) } + + if needMasterConnectionPooler(&c.Spec) || needReplicaConnectionPooler(&c.Spec) { + connectionPoolerUser := c.systemUsers[constants.ConnectionPoolerUserKeyName] + userNames = append(userNames, connectionPoolerUser.Name) + + if _, exists := c.pgUsers[connectionPoolerUser.Name]; !exists { + c.pgUsers[connectionPoolerUser.Name] = connectionPoolerUser + } + } + dbUsers, err = c.readPgUsersFromDatabase(userNames) if err != nil { return fmt.Errorf("error getting users from the database: %v", err) @@ -469,31 +564,12 @@ func (c *Cluster) syncRoles() (err error) { return nil } -// syncVolumes reads all persistent volumes and checks that their size matches the one declared in the statefulset. -func (c *Cluster) syncVolumes() error { - c.setProcessName("syncing volumes") - - act, err := c.volumesNeedResizing(c.Spec.Volume) - if err != nil { - return fmt.Errorf("could not compare size of the volumes: %v", err) - } - if !act { - return nil - } - if err := c.resizeVolumes(c.Spec.Volume, []volumes.VolumeResizer{&volumes.EBSVolumeResizer{AWSRegion: c.OpConfig.AWSRegion}}); err != nil { - return fmt.Errorf("could not sync volumes: %v", err) - } - - c.logger.Infof("volumes have been synced successfully") - - return nil -} - func (c *Cluster) syncDatabases() error { c.setProcessName("syncing databases") createDatabases := make(map[string]string) alterOwnerDatabases := make(map[string]string) + preparedDatabases := make([]string, 0) if err := c.initDbConn(); err != nil { return fmt.Errorf("could not init database connection") @@ -509,12 +585,24 @@ func (c *Cluster) syncDatabases() error { return fmt.Errorf("could not get current databases: %v", err) } - for datname, newOwner := range c.Spec.Databases { - currentOwner, exists := currentDatabases[datname] + // if no prepared databases are specified create a database named like the cluster + if c.Spec.PreparedDatabases != nil && len(c.Spec.PreparedDatabases) == 0 { // TODO: add option to disable creating such a default DB + c.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{strings.Replace(c.Name, "-", "_", -1): {}} + } + for preparedDatabaseName := range c.Spec.PreparedDatabases { + _, exists := currentDatabases[preparedDatabaseName] if !exists { - createDatabases[datname] = newOwner + createDatabases[preparedDatabaseName] = preparedDatabaseName + constants.OwnerRoleNameSuffix + preparedDatabases = append(preparedDatabases, preparedDatabaseName) + } + } + + for databaseName, newOwner := range c.Spec.Databases { + currentOwner, exists := currentDatabases[databaseName] + if !exists { + createDatabases[databaseName] = newOwner } else if currentOwner != newOwner { - alterOwnerDatabases[datname] = newOwner + alterOwnerDatabases[databaseName] = newOwner } } @@ -522,13 +610,116 @@ func (c *Cluster) syncDatabases() error { return nil } - for datname, owner := range createDatabases { - if err = c.executeCreateDatabase(datname, owner); err != nil { + for databaseName, owner := range createDatabases { + if err = c.executeCreateDatabase(databaseName, owner); err != nil { return err } } - for datname, owner := range alterOwnerDatabases { - if err = c.executeAlterDatabaseOwner(datname, owner); err != nil { + for databaseName, owner := range alterOwnerDatabases { + if err = c.executeAlterDatabaseOwner(databaseName, owner); err != nil { + return err + } + } + + // set default privileges for prepared database + for _, preparedDatabase := range preparedDatabases { + if err = c.execAlterGlobalDefaultPrivileges(preparedDatabase+constants.OwnerRoleNameSuffix, preparedDatabase); err != nil { + return err + } + } + + return nil +} + +func (c *Cluster) syncPreparedDatabases() error { + c.setProcessName("syncing prepared databases") + for preparedDbName, preparedDB := range c.Spec.PreparedDatabases { + if err := c.initDbConnWithName(preparedDbName); err != nil { + return fmt.Errorf("could not init connection to database %s: %v", preparedDbName, err) + } + + c.logger.Debugf("syncing prepared database %q", preparedDbName) + // now, prepare defined schemas + preparedSchemas := preparedDB.PreparedSchemas + if len(preparedDB.PreparedSchemas) == 0 { + preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}} + } + if err := c.syncPreparedSchemas(preparedDbName, preparedSchemas); err != nil { + return err + } + + // install extensions + if err := c.syncExtensions(preparedDB.Extensions); err != nil { + return err + } + + if err := c.closeDbConn(); err != nil { + c.logger.Errorf("could not close database connection: %v", err) + } + } + + return nil +} + +func (c *Cluster) syncPreparedSchemas(databaseName string, preparedSchemas map[string]acidv1.PreparedSchema) error { + c.setProcessName("syncing prepared schemas") + + currentSchemas, err := c.getSchemas() + if err != nil { + return fmt.Errorf("could not get current schemas: %v", err) + } + + var schemas []string + + for schema := range preparedSchemas { + schemas = append(schemas, schema) + } + + if createPreparedSchemas, equal := util.SubstractStringSlices(schemas, currentSchemas); !equal { + for _, schemaName := range createPreparedSchemas { + owner := constants.OwnerRoleNameSuffix + dbOwner := databaseName + owner + if preparedSchemas[schemaName].DefaultRoles == nil || *preparedSchemas[schemaName].DefaultRoles { + owner = databaseName + "_" + schemaName + owner + } else { + owner = dbOwner + } + if err = c.executeCreateDatabaseSchema(databaseName, schemaName, dbOwner, owner); err != nil { + return err + } + } + } + + return nil +} + +func (c *Cluster) syncExtensions(extensions map[string]string) error { + c.setProcessName("syncing database extensions") + + createExtensions := make(map[string]string) + alterExtensions := make(map[string]string) + + currentExtensions, err := c.getExtensions() + if err != nil { + return fmt.Errorf("could not get current database extensions: %v", err) + } + + for extName, newSchema := range extensions { + currentSchema, exists := currentExtensions[extName] + if !exists { + createExtensions[extName] = newSchema + } else if currentSchema != newSchema { + alterExtensions[extName] = newSchema + } + } + + for extName, schema := range createExtensions { + if err = c.executeCreateExtension(extName, schema); err != nil { + return err + } + } + for extName, schema := range alterExtensions { + if err = c.executeAlterExtension(extName, schema); err != nil { return err } } @@ -547,14 +738,14 @@ func (c *Cluster) syncLogicalBackupJob() error { // sync the job if it exists jobName := c.getLogicalBackupJobName() - if job, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(jobName, metav1.GetOptions{}); err == nil { + if job, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(context.TODO(), jobName, metav1.GetOptions{}); err == nil { desiredJob, err = c.generateLogicalBackupJob() if err != nil { return fmt.Errorf("could not generate the desired logical backup job state: %v", err) } if match, reason := k8sutil.SameLogicalBackupJob(job, desiredJob); !match { - c.logger.Infof("logical job %q is not in the desired state and needs to be updated", + c.logger.Infof("logical job %s is not in the desired state and needs to be updated", c.getLogicalBackupJobName(), ) if reason != "" { @@ -575,13 +766,13 @@ func (c *Cluster) syncLogicalBackupJob() error { c.logger.Info("could not find the cluster's logical backup job") if err = c.createLogicalBackupJob(); err == nil { - c.logger.Infof("created missing logical backup job %q", jobName) + c.logger.Infof("created missing logical backup job %s", jobName) } else { if !k8sutil.ResourceAlreadyExists(err) { return fmt.Errorf("could not create missing logical backup job: %v", err) } - c.logger.Infof("logical backup job %q already exists", jobName) - if _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(jobName, metav1.GetOptions{}); err != nil { + c.logger.Infof("logical backup job %s already exists", jobName) + if _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(context.TODO(), jobName, metav1.GetOptions{}); err != nil { return fmt.Errorf("could not fetch existing logical backup job: %v", err) } } diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index 138b7015c..8aa519817 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -69,3 +69,12 @@ type ClusterStatus struct { Spec acidv1.PostgresSpec Error error } + +type TemplateParams map[string]interface{} + +type InstallFunction func(schema string, user string, role PostgresRole) error + +type SyncReason []string + +// no sync happened, empty value +var NoSync SyncReason = []string{} diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 8c02fed2e..393a490bd 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -2,6 +2,7 @@ package cluster import ( "bytes" + "context" "encoding/gob" "encoding/json" "fmt" @@ -17,12 +18,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "github.com/sirupsen/logrus" acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "github.com/zalando/postgres-operator/pkg/util/nicediff" "github.com/zalando/postgres-operator/pkg/util/retryutil" ) @@ -47,7 +50,7 @@ func (g *SecretOauthTokenGetter) getOAuthToken() (string, error) { // Temporary getting postgresql-operator secret from the NamespaceDefault credentialsSecret, err := g.kubeClient. Secrets(g.OAuthTokenSecretName.Namespace). - Get(g.OAuthTokenSecretName.Name, metav1.GetOptions{}) + Get(context.TODO(), g.OAuthTokenSecretName.Name, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("could not get credentials secret: %v", err) @@ -165,40 +168,59 @@ func (c *Cluster) logPDBChanges(old, new *policybeta1.PodDisruptionBudget, isUpd ) } - c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec)) + logNiceDiff(c.logger, old.Spec, new.Spec) +} + +func logNiceDiff(log *logrus.Entry, old, new interface{}) { + o, erro := json.MarshalIndent(old, "", " ") + n, errn := json.MarshalIndent(new, "", " ") + + if erro != nil || errn != nil { + panic("could not marshal API objects, should not happen") + } + + nice := nicediff.Diff(string(o), string(n), true) + for _, s := range strings.Split(nice, "\n") { + // " is not needed in the value to understand + log.Debugf(strings.ReplaceAll(s, "\"", "")) + } } func (c *Cluster) logStatefulSetChanges(old, new *appsv1.StatefulSet, isUpdate bool, reasons []string) { if isUpdate { - c.logger.Infof("statefulset %q has been changed", util.NameFromMeta(old.ObjectMeta)) + c.logger.Infof("statefulset %s has been changed", util.NameFromMeta(old.ObjectMeta)) } else { - c.logger.Infof("statefulset %q is not in the desired state and needs to be updated", + c.logger.Infof("statefulset %s is not in the desired state and needs to be updated", util.NameFromMeta(old.ObjectMeta), ) } + + logNiceDiff(c.logger, old.Spec, new.Spec) + if !reflect.DeepEqual(old.Annotations, new.Annotations) { - c.logger.Debugf("metadata.annotation diff\n%s\n", util.PrettyDiff(old.Annotations, new.Annotations)) + c.logger.Debugf("metadata.annotation are different") + logNiceDiff(c.logger, old.Annotations, new.Annotations) } - c.logger.Debugf("spec diff between old and new statefulsets: \n%s\n", util.PrettyDiff(old.Spec, new.Spec)) if len(reasons) > 0 { for _, reason := range reasons { - c.logger.Infof("reason: %q", reason) + c.logger.Infof("reason: %s", reason) } } } func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isUpdate bool, reason string) { if isUpdate { - c.logger.Infof("%s service %q has been changed", + c.logger.Infof("%s service %s has been changed", role, util.NameFromMeta(old.ObjectMeta), ) } else { - c.logger.Infof("%s service %q is not in the desired state and needs to be updated", + c.logger.Infof("%s service %s is not in the desired state and needs to be updated", role, util.NameFromMeta(old.ObjectMeta), ) } - c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec)) + + logNiceDiff(c.logger, old.Spec, new.Spec) if reason != "" { c.logger.Infof("reason: %s", reason) @@ -207,7 +229,7 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) { c.logger.Infof("volume specification has been changed") - c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old, new)) + logNiceDiff(c.logger, old, new) } func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { @@ -216,24 +238,64 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { return nil, fmt.Errorf("no teamId specified") } + c.logger.Debugf("fetching possible additional team members for team %q", teamID) + members := []string{} + additionalMembers := c.PgTeamMap[teamID].AdditionalMembers + for _, member := range additionalMembers { + members = append(members, member) + } + if !c.OpConfig.EnableTeamsAPI { - c.logger.Debugf("team API is disabled, returning empty list of members for team %q", teamID) - return []string{}, nil + c.logger.Debugf("team API is disabled, only returning %d members for team %q", len(members), teamID) + return members, nil } token, err := c.oauthTokenGetter.getOAuthToken() if err != nil { - c.logger.Warnf("could not get oauth token to authenticate to team service API, returning empty list of team members: %v", err) - return []string{}, nil + c.logger.Warnf("could not get oauth token to authenticate to team service API, only returning %d members for team %q: %v", len(members), teamID, err) + return members, nil } teamInfo, err := c.teamsAPIClient.TeamInfo(teamID, token) if err != nil { - c.logger.Warnf("could not get team info for team %q, returning empty list of team members: %v", teamID, err) - return []string{}, nil + c.logger.Warnf("could not get team info for team %q, only returning %d members: %v", teamID, len(members), err) + return members, nil } - return teamInfo.Members, nil + for _, member := range teamInfo.Members { + if !(util.SliceContains(members, member)) { + members = append(members, member) + } + } + + return members, nil +} + +// Returns annotations to be passed to child objects +func (c *Cluster) annotationsSet(annotations map[string]string) map[string]string { + + if annotations == nil { + annotations = make(map[string]string) + } + + pgCRDAnnotations := c.ObjectMeta.Annotations + + // allow to inherit certain labels from the 'postgres' object + if pgCRDAnnotations != nil { + for k, v := range pgCRDAnnotations { + for _, match := range c.OpConfig.InheritedAnnotations { + if k == match { + annotations[k] = v + } + } + } + } + + if len(annotations) > 0 { + return annotations + } + + return nil } func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) { @@ -278,7 +340,7 @@ func (c *Cluster) waitStatefulsetReady() error { listOptions := metav1.ListOptions{ LabelSelector: c.labelsSet(false).String(), } - ss, err := c.KubeClient.StatefulSets(c.Namespace).List(listOptions) + ss, err := c.KubeClient.StatefulSets(c.Namespace).List(context.TODO(), listOptions) if err != nil { return false, err } @@ -313,7 +375,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error { } podsNumber = 1 if !anyReplica { - pods, err := c.KubeClient.Pods(namespace).List(listOptions) + pods, err := c.KubeClient.Pods(namespace).List(context.TODO(), listOptions) if err != nil { return err } @@ -327,7 +389,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error { func() (bool, error) { masterCount := 0 if !anyReplica { - masterPods, err2 := c.KubeClient.Pods(namespace).List(masterListOption) + masterPods, err2 := c.KubeClient.Pods(namespace).List(context.TODO(), masterListOption) if err2 != nil { return false, err2 } @@ -337,7 +399,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error { } masterCount = len(masterPods.Items) } - replicaPods, err2 := c.KubeClient.Pods(namespace).List(replicaListOption) + replicaPods, err2 := c.KubeClient.Pods(namespace).List(context.TODO(), replicaListOption) if err2 != nil { return false, err2 } @@ -408,7 +470,10 @@ func (c *Cluster) labelsSet(shouldAddExtraLabels bool) labels.Set { } func (c *Cluster) labelsSelector() *metav1.LabelSelector { - return &metav1.LabelSelector{MatchLabels: c.labelsSet(false), MatchExpressions: nil} + return &metav1.LabelSelector{ + MatchLabels: c.labelsSet(false), + MatchExpressions: nil, + } } func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) labels.Set { @@ -483,3 +548,31 @@ func (c *Cluster) GetSpec() (*acidv1.Postgresql, error) { func (c *Cluster) patroniUsesKubernetes() bool { return c.OpConfig.EtcdHost == "" } + +func (c *Cluster) patroniKubernetesUseConfigMaps() bool { + if !c.patroniUsesKubernetes() { + return false + } + + // otherwise, follow the operator configuration + return c.OpConfig.KubernetesUseConfigMaps +} + +// Earlier arguments take priority +func mergeContainers(containers ...[]v1.Container) ([]v1.Container, []string) { + containerNameTaken := map[string]bool{} + result := make([]v1.Container, 0) + conflicts := make([]string, 0) + + for _, containerArray := range containers { + for _, container := range containerArray { + if _, taken := containerNameTaken[container.Name]; taken { + conflicts = append(conflicts, container.Name) + } else { + containerNameTaken[container.Name] = true + result = append(result, container) + } + } + } + return result, conflicts +} diff --git a/pkg/cluster/util_test.go b/pkg/cluster/util_test.go new file mode 100644 index 000000000..7afc59f28 --- /dev/null +++ b/pkg/cluster/util_test.go @@ -0,0 +1,141 @@ +package cluster + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" + "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sFake "k8s.io/client-go/kubernetes/fake" +) + +func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset) { + clientSet := k8sFake.NewSimpleClientset() + acidClientSet := fakeacidv1.NewSimpleClientset() + + return k8sutil.KubernetesClient{ + PodDisruptionBudgetsGetter: clientSet.PolicyV1beta1(), + ServicesGetter: clientSet.CoreV1(), + StatefulSetsGetter: clientSet.AppsV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + }, clientSet +} + +func TestInheritedAnnotations(t *testing.T) { + testName := "test inheriting annotations from manifest" + client, _ := newFakeK8sAnnotationsClient() + clusterName := "acid-test-cluster" + namespace := "default" + annotationValue := "acid" + role := Master + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "owned-by": annotationValue, + }, + }, + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + InheritedAnnotations: []string{"owned-by"}, + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + cluster.Name = clusterName + cluster.Namespace = namespace + + // test annotationsSet function + inheritedAnnotations := cluster.annotationsSet(nil) + + listOptions := metav1.ListOptions{ + LabelSelector: cluster.labelsSet(false).String(), + } + + // check statefulset annotations + _, err := cluster.createStatefulSet() + assert.NoError(t, err) + + stsList, err := client.StatefulSets(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + for _, sts := range stsList.Items { + if !(util.MapContains(sts.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: StatefulSet %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + // pod template + if !(util.MapContains(sts.Spec.Template.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: pod template %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + // pvc template + if util.MapContains(sts.Spec.VolumeClaimTemplates[0].Annotations, inheritedAnnotations) { + t.Errorf("%s: PVC template %v not expected to have inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + } + + // check service annotations + cluster.createService(Master) + svcList, err := client.Services(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + for _, svc := range svcList.Items { + if !(util.MapContains(svc.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: Service %v not inherited annotations %#v, got %#v", testName, svc.ObjectMeta.Name, inheritedAnnotations, svc.ObjectMeta.Annotations) + } + } + + // check pod disruption budget annotations + cluster.createPodDisruptionBudget() + pdbList, err := client.PodDisruptionBudgets(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + for _, pdb := range pdbList.Items { + if !(util.MapContains(pdb.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: Pod Disruption Budget %v not inherited annotations %#v, got %#v", testName, pdb.ObjectMeta.Name, inheritedAnnotations, pdb.ObjectMeta.Annotations) + } + } + + // check pooler deployment annotations + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} + cluster.ConnectionPooler[role] = &ConnectionPoolerObjects{ + Name: cluster.connectionPoolerName(role), + ClusterName: cluster.ClusterName, + Namespace: cluster.Namespace, + Role: role, + } + deploy, err := cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[role]) + assert.NoError(t, err) + + if !(util.MapContains(deploy.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: Deployment %v not inherited annotations %#v, got %#v", testName, deploy.ObjectMeta.Name, inheritedAnnotations, deploy.ObjectMeta.Annotations) + } + +} diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index d92ae6258..e07d453ec 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -1,14 +1,16 @@ package cluster import ( + "context" "fmt" "strconv" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/aws/aws-sdk-go/aws" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" @@ -17,13 +19,213 @@ import ( "github.com/zalando/postgres-operator/pkg/util/volumes" ) +func (c *Cluster) syncVolumes() error { + c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode) + var err error + + // check quantity string once, and do not bother with it anymore anywhere else + _, err = resource.ParseQuantity(c.Spec.Volume.Size) + if err != nil { + return fmt.Errorf("could not parse volume size from the manifest: %v", err) + } + + if c.OpConfig.StorageResizeMode == "mixed" { + // mixed op uses AWS API to adjust size, throughput, iops, and calls pvc change for file system resize + // in case of errors we proceed to let K8s do its work, favoring disk space increase of other adjustments + + err = c.populateVolumeMetaData() + if err != nil { + c.logger.Errorf("populating EBS meta data failed, skipping potential adjustements: %v", err) + } else { + err = c.syncUnderlyingEBSVolume() + if err != nil { + c.logger.Errorf("errors occured during EBS volume adjustments: %v", err) + } + } + + // resize pvc to adjust filesystem size until better K8s support + if err = c.syncVolumeClaims(); err != nil { + err = fmt.Errorf("could not sync persistent volume claims: %v", err) + return err + } + } else if c.OpConfig.StorageResizeMode == "pvc" { + if err = c.syncVolumeClaims(); err != nil { + err = fmt.Errorf("could not sync persistent volume claims: %v", err) + return err + } + } else if c.OpConfig.StorageResizeMode == "ebs" { + // potentially enlarge volumes before changing the statefulset. By doing that + // in this order we make sure the operator is not stuck waiting for a pod that + // cannot start because it ran out of disk space. + // TODO: handle the case of the cluster that is downsized and enlarged again + // (there will be a volume from the old pod for which we can't act before the + // the statefulset modification is concluded) + if err = c.syncEbsVolumes(); err != nil { + err = fmt.Errorf("could not sync persistent volumes: %v", err) + return err + } + } else { + c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") + } + + return nil +} + +func (c *Cluster) syncUnderlyingEBSVolume() error { + c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size") + + var err error + + targetValue := c.Spec.Volume + newSize, err := resource.ParseQuantity(targetValue.Size) + targetSize := quantityToGigabyte(newSize) + + awsGp3 := aws.String("gp3") + awsIo2 := aws.String("io2") + + errors := []string{} + + for _, volume := range c.EBSVolumes { + var modifyIops *int64 + var modifyThroughput *int64 + var modifySize *int64 + var modifyType *string + + if targetValue.Iops != nil { + if volume.Iops != *targetValue.Iops { + modifyIops = targetValue.Iops + } + } + + if targetValue.Throughput != nil { + if volume.Throughput != *targetValue.Throughput { + modifyThroughput = targetValue.Throughput + } + } + + if targetSize > volume.Size { + modifySize = &targetSize + } + + if modifyIops != nil || modifyThroughput != nil || modifySize != nil { + if modifyIops != nil || modifyThroughput != nil { + // we default to gp3 if iops and throughput are configured + modifyType = awsGp3 + if targetValue.VolumeType == "io2" { + modifyType = awsIo2 + } + } else if targetValue.VolumeType == "gp3" && volume.VolumeType != "gp3" { + modifyType = awsGp3 + } else { + // do not touch type + modifyType = nil + } + + err = c.VolumeResizer.ModifyVolume(volume.VolumeID, modifyType, modifySize, modifyIops, modifyThroughput) + if err != nil { + errors = append(errors, fmt.Sprintf("modify volume failed: volume=%s size=%d iops=%d throughput=%d", volume.VolumeID, volume.Size, volume.Iops, volume.Throughput)) + } + } + } + + if len(errors) > 0 { + for _, s := range errors { + c.logger.Warningf(s) + } + // c.logger.Errorf("failed to modify %d of %d volumes", len(c.EBSVolumes), len(errors)) + } + return nil +} + +func (c *Cluster) populateVolumeMetaData() error { + c.logger.Infof("starting reading ebs meta data") + + pvs, err := c.listPersistentVolumes() + if err != nil { + return fmt.Errorf("could not list persistent volumes: %v", err) + } + c.logger.Debugf("found %d volumes, size of known volumes %d", len(pvs), len(c.EBSVolumes)) + + volumeIds := []string{} + var volumeID string + for _, pv := range pvs { + volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + if err != nil { + continue + } + + volumeIds = append(volumeIds, volumeID) + } + + currentVolumes, err := c.VolumeResizer.DescribeVolumes(volumeIds) + if nil != err { + return err + } + + if len(currentVolumes) != len(c.EBSVolumes) { + c.logger.Debugf("number of ebs volumes (%d) discovered differs from already known volumes (%d)", len(currentVolumes), len(c.EBSVolumes)) + } + + // reset map, operator is not responsible for dangling ebs volumes + c.EBSVolumes = make(map[string]volumes.VolumeProperties) + for _, volume := range currentVolumes { + c.EBSVolumes[volume.VolumeID] = volume + } + + return nil +} + +// syncVolumeClaims reads all persistent volume claims and checks that their size matches the one declared in the statefulset. +func (c *Cluster) syncVolumeClaims() error { + c.setProcessName("syncing volume claims") + + needsResizing, err := c.volumeClaimsNeedResizing(c.Spec.Volume) + if err != nil { + return fmt.Errorf("could not compare size of the volume claims: %v", err) + } + + if !needsResizing { + c.logger.Infof("volume claims do not require changes") + return nil + } + + if err := c.resizeVolumeClaims(c.Spec.Volume); err != nil { + return fmt.Errorf("could not sync volume claims: %v", err) + } + + c.logger.Infof("volume claims have been synced successfully") + + return nil +} + +// syncVolumes reads all persistent volumes and checks that their size matches the one declared in the statefulset. +func (c *Cluster) syncEbsVolumes() error { + c.setProcessName("syncing EBS and Claims volumes") + + act, err := c.volumesNeedResizing() + if err != nil { + return fmt.Errorf("could not compare size of the volumes: %v", err) + } + if !act { + return nil + } + + if err := c.resizeVolumes(); err != nil { + return fmt.Errorf("could not sync volumes: %v", err) + } + + c.logger.Infof("volumes have been synced successfully") + + return nil +} + func (c *Cluster) listPersistentVolumeClaims() ([]v1.PersistentVolumeClaim, error) { ns := c.Namespace listOptions := metav1.ListOptions{ LabelSelector: c.labelsSet(false).String(), } - pvcs, err := c.KubeClient.PersistentVolumeClaims(ns).List(listOptions) + pvcs, err := c.KubeClient.PersistentVolumeClaims(ns).List(context.TODO(), listOptions) if err != nil { return nil, fmt.Errorf("could not list of PersistentVolumeClaims: %v", err) } @@ -38,7 +240,7 @@ func (c *Cluster) deletePersistentVolumeClaims() error { } for _, pvc := range pvcs { c.logger.Debugf("deleting PVC %q", util.NameFromMeta(pvc.ObjectMeta)) - if err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, c.deleteOptions); err != nil { + if err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, c.deleteOptions); err != nil { c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err) } } @@ -51,6 +253,35 @@ func (c *Cluster) deletePersistentVolumeClaims() error { return nil } +func (c *Cluster) resizeVolumeClaims(newVolume acidv1.Volume) error { + c.logger.Debugln("resizing PVCs") + pvcs, err := c.listPersistentVolumeClaims() + if err != nil { + return err + } + newQuantity, err := resource.ParseQuantity(newVolume.Size) + if err != nil { + return fmt.Errorf("could not parse volume size: %v", err) + } + newSize := quantityToGigabyte(newQuantity) + for _, pvc := range pvcs { + volumeSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) + if volumeSize >= newSize { + if volumeSize > newSize { + c.logger.Warningf("cannot shrink persistent volume") + } + continue + } + pvc.Spec.Resources.Requests[v1.ResourceStorage] = newQuantity + c.logger.Debugf("updating persistent volume claim definition for volume %q", pvc.Name) + if _, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}); err != nil { + return fmt.Errorf("could not update persistent volume claim: %q", err) + } + c.logger.Debugf("successfully updated persistent volume claim %q", pvc.Name) + } + return nil +} + func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { result := make([]*v1.PersistentVolume, 0) @@ -78,7 +309,7 @@ func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { continue } } - pv, err := c.KubeClient.PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + pv, err := c.KubeClient.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("could not get PersistentVolume: %v", err) } @@ -89,19 +320,27 @@ func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { } // resizeVolumes resize persistent volumes compatible with the given resizer interface -func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.VolumeResizer) error { - c.setProcessName("resizing volumes") +func (c *Cluster) resizeVolumes() error { + if c.VolumeResizer == nil { + return fmt.Errorf("no volume resizer set for EBS volume handling") + } - var totalIncompatible int + c.setProcessName("resizing EBS volumes") - newQuantity, err := resource.ParseQuantity(newVolume.Size) + newQuantity, err := resource.ParseQuantity(c.Spec.Volume.Size) if err != nil { return fmt.Errorf("could not parse volume size: %v", err) } - pvs, newSize, err := c.listVolumesWithManifestSize(newVolume) + + newSize := quantityToGigabyte(newQuantity) + resizer := c.VolumeResizer + var totalIncompatible int + + pvs, err := c.listPersistentVolumes() if err != nil { return fmt.Errorf("could not list persistent volumes: %v", err) } + for _, pv := range pvs { volumeSize := quantityToGigabyte(pv.Spec.Capacity[v1.ResourceStorage]) if volumeSize >= newSize { @@ -111,45 +350,45 @@ func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.Volu continue } compatible := false - for _, resizer := range resizers { - if !resizer.VolumeBelongsToProvider(pv) { - continue - } - compatible = true - if !resizer.IsConnectedToProvider() { - err := resizer.ConnectToProvider() - if err != nil { - return fmt.Errorf("could not connect to the volume provider: %v", err) - } - defer func() { - if err := resizer.DisconnectFromProvider(); err != nil { - c.logger.Errorf("%v", err) - } - }() - } - awsVolumeID, err := resizer.GetProviderVolumeID(pv) - if err != nil { - return err - } - c.logger.Debugf("updating persistent volume %q to %d", pv.Name, newSize) - if err := resizer.ResizeVolume(awsVolumeID, newSize); err != nil { - return fmt.Errorf("could not resize EBS volume %q: %v", awsVolumeID, err) - } - c.logger.Debugf("resizing the filesystem on the volume %q", pv.Name) - podName := getPodNameFromPersistentVolume(pv) - if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil { - return fmt.Errorf("could not resize the filesystem on pod %q: %v", podName, err) - } - c.logger.Debugf("filesystem resize successful on volume %q", pv.Name) - pv.Spec.Capacity[v1.ResourceStorage] = newQuantity - c.logger.Debugf("updating persistent volume definition for volume %q", pv.Name) - if _, err := c.KubeClient.PersistentVolumes().Update(pv); err != nil { - return fmt.Errorf("could not update persistent volume: %q", err) - } - c.logger.Debugf("successfully updated persistent volume %q", pv.Name) + + if !resizer.VolumeBelongsToProvider(pv) { + continue } + compatible = true + if !resizer.IsConnectedToProvider() { + err := resizer.ConnectToProvider() + if err != nil { + return fmt.Errorf("could not connect to the volume provider: %v", err) + } + defer func() { + if err := resizer.DisconnectFromProvider(); err != nil { + c.logger.Errorf("%v", err) + } + }() + } + awsVolumeID, err := resizer.GetProviderVolumeID(pv) + if err != nil { + return err + } + c.logger.Debugf("updating persistent volume %q to %d", pv.Name, newSize) + if err := resizer.ResizeVolume(awsVolumeID, newSize); err != nil { + return fmt.Errorf("could not resize EBS volume %q: %v", awsVolumeID, err) + } + c.logger.Debugf("resizing the filesystem on the volume %q", pv.Name) + podName := getPodNameFromPersistentVolume(pv) + if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil { + return fmt.Errorf("could not resize the filesystem on pod %q: %v", podName, err) + } + c.logger.Debugf("filesystem resize successful on volume %q", pv.Name) + pv.Spec.Capacity[v1.ResourceStorage] = newQuantity + c.logger.Debugf("updating persistent volume definition for volume %q", pv.Name) + if _, err := c.KubeClient.PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil { + return fmt.Errorf("could not update persistent volume: %q", err) + } + c.logger.Debugf("successfully updated persistent volume %q", pv.Name) + if !compatible { - c.logger.Warningf("volume %q is incompatible with all available resizing providers", pv.Name) + c.logger.Warningf("volume %q is incompatible with all available resizing providers, consider switching storage_resize_mode to pvc or off", pv.Name) totalIncompatible++ } } @@ -159,13 +398,18 @@ func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.Volu return nil } -func (c *Cluster) volumesNeedResizing(newVolume acidv1.Volume) (bool, error) { - vols, manifestSize, err := c.listVolumesWithManifestSize(newVolume) +func (c *Cluster) volumeClaimsNeedResizing(newVolume acidv1.Volume) (bool, error) { + newSize, err := resource.ParseQuantity(newVolume.Size) + manifestSize := quantityToGigabyte(newSize) if err != nil { - return false, err + return false, fmt.Errorf("could not parse volume size from the manifest: %v", err) } - for _, pv := range vols { - currentSize := quantityToGigabyte(pv.Spec.Capacity[v1.ResourceStorage]) + pvcs, err := c.listPersistentVolumeClaims() + if err != nil { + return false, fmt.Errorf("could not receive persistent volume claims: %v", err) + } + for _, pvc := range pvcs { + currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) if currentSize != manifestSize { return true, nil } @@ -173,17 +417,21 @@ func (c *Cluster) volumesNeedResizing(newVolume acidv1.Volume) (bool, error) { return false, nil } -func (c *Cluster) listVolumesWithManifestSize(newVolume acidv1.Volume) ([]*v1.PersistentVolume, int64, error) { - newSize, err := resource.ParseQuantity(newVolume.Size) - if err != nil { - return nil, 0, fmt.Errorf("could not parse volume size from the manifest: %v", err) - } - manifestSize := quantityToGigabyte(newSize) +func (c *Cluster) volumesNeedResizing() (bool, error) { + newQuantity, _ := resource.ParseQuantity(c.Spec.Volume.Size) + newSize := quantityToGigabyte(newQuantity) + vols, err := c.listPersistentVolumes() if err != nil { - return nil, 0, fmt.Errorf("could not list persistent volumes: %v", err) + return false, err } - return vols, manifestSize, nil + for _, pv := range vols { + currentSize := quantityToGigabyte(pv.Spec.Capacity[v1.ResourceStorage]) + if currentSize != newSize { + return true, nil + } + } + return false, nil } // getPodNameFromPersistentVolume returns a pod name that it extracts from the volume claim ref. @@ -196,3 +444,64 @@ func getPodNameFromPersistentVolume(pv *v1.PersistentVolume) *spec.NamespacedNam func quantityToGigabyte(q resource.Quantity) int64 { return q.ScaledValue(0) / (1 * constants.Gigabyte) } + +func (c *Cluster) executeEBSMigration() error { + if !c.OpConfig.EnableEBSGp3Migration { + return nil + } + c.logger.Infof("starting EBS gp2 to gp3 migration") + + pvs, err := c.listPersistentVolumes() + if err != nil { + return fmt.Errorf("could not list persistent volumes: %v", err) + } + c.logger.Debugf("found %d volumes, size of known volumes %d", len(pvs), len(c.EBSVolumes)) + + volumeIds := []string{} + var volumeID string + for _, pv := range pvs { + volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + if err != nil { + continue + } + + volumeIds = append(volumeIds, volumeID) + } + + if len(volumeIds) == len(c.EBSVolumes) { + hasGp2 := false + for _, v := range c.EBSVolumes { + if v.VolumeType == "gp2" { + hasGp2 = true + } + } + + if !hasGp2 { + c.logger.Infof("no EBS gp2 volumes left to migrate") + return nil + } + } + + awsVolumes, err := c.VolumeResizer.DescribeVolumes(volumeIds) + if nil != err { + return err + } + + var i3000 int64 = 3000 + var i125 int64 = 125 + + for _, volume := range awsVolumes { + if volume.VolumeType == "gp2" && volume.Size < c.OpConfig.EnableEBSGp3MigrationMaxSize { + c.logger.Infof("modifying EBS volume %s to type gp3 migration (%d)", volume.VolumeID, volume.Size) + err = c.VolumeResizer.ModifyVolume(volume.VolumeID, aws.String("gp3"), &volume.Size, &i3000, &i125) + if nil != err { + c.logger.Warningf("modifying volume %s failed: %v", volume.VolumeID, err) + } + } else { + c.logger.Debugf("skipping EBS volume %s to type gp3 migration (%d)", volume.VolumeID, volume.Size) + } + c.EBSVolumes[volume.VolumeID] = volume + } + + return nil +} diff --git a/pkg/cluster/volumes_test.go b/pkg/cluster/volumes_test.go new file mode 100644 index 000000000..aea7711af --- /dev/null +++ b/pkg/cluster/volumes_test.go @@ -0,0 +1,464 @@ +package cluster + +import ( + "fmt" + "testing" + + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/assert" + "github.com/zalando/postgres-operator/mocks" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/constants" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "github.com/zalando/postgres-operator/pkg/util/volumes" + "k8s.io/client-go/kubernetes/fake" +) + +func newFakeK8sPVCclient() (k8sutil.KubernetesClient, *fake.Clientset) { + clientSet := fake.NewSimpleClientset() + + return k8sutil.KubernetesClient{ + PersistentVolumeClaimsGetter: clientSet.CoreV1(), + PersistentVolumesGetter: clientSet.CoreV1(), + PodsGetter: clientSet.CoreV1(), + }, clientSet +} + +func TestResizeVolumeClaim(t *testing.T) { + testName := "test resizing of persistent volume claims" + client, _ := newFakeK8sPVCclient() + clusterName := "acid-test-cluster" + namespace := "default" + newVolumeSize := "2Gi" + + storage1Gi, err := resource.ParseQuantity("1Gi") + assert.NoError(t, err) + + // new cluster with pvc storage resize mode and configured labels + var cluster = New( + Config{ + OpConfig: config.Config{ + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + }, + StorageResizeMode: "pvc", + }, + }, client, acidv1.Postgresql{}, logger, eventRecorder) + + // set metadata, so that labels will get correct values + cluster.Name = clusterName + cluster.Namespace = namespace + filterLabels := cluster.labelsSet(false) + + // define and create PVCs for 1Gi volumes + pvcList := CreatePVCs(namespace, clusterName, filterLabels, 2, "1Gi") + // add another PVC with different cluster name + pvcList.Items = append(pvcList.Items, CreatePVCs(namespace, clusterName+"-2", labels.Set{}, 1, "1Gi").Items[0]) + + for _, pvc := range pvcList.Items { + cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{}) + } + + // test resizing + cluster.resizeVolumeClaims(acidv1.Volume{Size: newVolumeSize}) + + pvcs, err := cluster.listPersistentVolumeClaims() + assert.NoError(t, err) + + // check if listPersistentVolumeClaims returns only the PVCs matching the filter + if len(pvcs) != len(pvcList.Items)-1 { + t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1) + } + + // check if PVCs were correctly resized + for _, pvc := range pvcs { + newStorageSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) + expectedQuantity, err := resource.ParseQuantity(newVolumeSize) + assert.NoError(t, err) + expectedSize := quantityToGigabyte(expectedQuantity) + if newStorageSize != expectedSize { + t.Errorf("%s: resizing failed, got %v, expected %v", testName, newStorageSize, expectedSize) + } + } + + // check if other PVC was not resized + pvc2, err := cluster.KubeClient.PersistentVolumeClaims(namespace).Get(context.TODO(), constants.DataVolumeName+"-"+clusterName+"-2-0", metav1.GetOptions{}) + assert.NoError(t, err) + unchangedSize := quantityToGigabyte(pvc2.Spec.Resources.Requests[v1.ResourceStorage]) + expectedSize := quantityToGigabyte(storage1Gi) + if unchangedSize != expectedSize { + t.Errorf("%s: volume size changed, got %v, expected %v", testName, unchangedSize, expectedSize) + } +} + +func TestQuantityToGigabyte(t *testing.T) { + tests := []struct { + name string + quantityStr string + expected int64 + }{ + { + "test with 1Gi", + "1Gi", + 1, + }, + { + "test with float", + "1.5Gi", + int64(1), + }, + { + "test with 1000Mi", + "1000Mi", + int64(0), + }, + } + + for _, tt := range tests { + quantity, err := resource.ParseQuantity(tt.quantityStr) + assert.NoError(t, err) + gigabyte := quantityToGigabyte(quantity) + if gigabyte != tt.expected { + t.Errorf("%s: got %v, expected %v", tt.name, gigabyte, tt.expected) + } + } +} + +func CreatePVCs(namespace string, clusterName string, labels labels.Set, n int, size string) v1.PersistentVolumeClaimList { + // define and create PVCs for 1Gi volumes + storage1Gi, _ := resource.ParseQuantity(size) + pvcList := v1.PersistentVolumeClaimList{ + Items: []v1.PersistentVolumeClaim{}, + } + + for i := 0; i < n; i++ { + pvc := v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%d", constants.DataVolumeName, clusterName, i), + Namespace: namespace, + Labels: labels, + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: storage1Gi, + }, + }, + VolumeName: fmt.Sprintf("persistent-volume-%d", i), + }, + } + pvcList.Items = append(pvcList.Items, pvc) + } + + return pvcList +} + +func TestMigrateEBS(t *testing.T) { + client, _ := newFakeK8sPVCclient() + clusterName := "acid-test-cluster" + namespace := "default" + + // new cluster with pvc storage resize mode and configured labels + var cluster = New( + Config{ + OpConfig: config.Config{ + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + }, + StorageResizeMode: "pvc", + EnableEBSGp3Migration: true, + EnableEBSGp3MigrationMaxSize: 1000, + }, + }, client, acidv1.Postgresql{}, logger, eventRecorder) + cluster.Spec.Volume.Size = "1Gi" + + // set metadata, so that labels will get correct values + cluster.Name = clusterName + cluster.Namespace = namespace + filterLabels := cluster.labelsSet(false) + + testVolumes := []testVolume{ + { + size: 100, + }, + { + size: 100, + }, + } + + initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resizer := mocks.NewMockVolumeResizer(ctrl) + + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( + []volumes.VolumeProperties{ + {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 100}, + {VolumeID: "ebs-volume-2", VolumeType: "gp3", Size: 100}}, nil) + + // expect only gp2 volume to be modified + resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq(aws.String("gp3")), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + + cluster.VolumeResizer = resizer + cluster.executeEBSMigration() +} + +type testVolume struct { + iops int64 + throughtput int64 + size int64 + volType string +} + +func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustername string, labels labels.Set, volumes []testVolume) { + i := 0 + for _, v := range volumes { + storage1Gi, _ := resource.ParseQuantity(fmt.Sprintf("%d", v.size)) + + ps := v1.PersistentVolumeSpec{} + ps.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{} + ps.AWSElasticBlockStore.VolumeID = fmt.Sprintf("aws://eu-central-1b/ebs-volume-%d", i+1) + + pv := v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("persistent-volume-%d", i), + }, + Spec: ps, + } + + client.PersistentVolumes().Create(context.TODO(), &pv, metav1.CreateOptions{}) + + pvc := v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%d", constants.DataVolumeName, clustername, i), + Namespace: namespace, + Labels: labels, + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: storage1Gi, + }, + }, + VolumeName: fmt.Sprintf("persistent-volume-%d", i), + }, + } + + client.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{}) + + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", clustername, i), + Labels: labels, + }, + Spec: v1.PodSpec{}, + } + + client.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + + i = i + 1 + } +} + +func TestMigrateGp3Support(t *testing.T) { + client, _ := newFakeK8sPVCclient() + clusterName := "acid-test-cluster" + namespace := "default" + + // new cluster with pvc storage resize mode and configured labels + var cluster = New( + Config{ + OpConfig: config.Config{ + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + }, + StorageResizeMode: "mixed", + EnableEBSGp3Migration: false, + EnableEBSGp3MigrationMaxSize: 1000, + }, + }, client, acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec.Volume.Size = "150Gi" + cluster.Spec.Volume.Iops = aws.Int64(6000) + cluster.Spec.Volume.Throughput = aws.Int64(275) + + // set metadata, so that labels will get correct values + cluster.Name = clusterName + cluster.Namespace = namespace + filterLabels := cluster.labelsSet(false) + + testVolumes := []testVolume{ + { + size: 100, + }, + { + size: 100, + }, + { + size: 100, + }, + } + + initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resizer := mocks.NewMockVolumeResizer(ctrl) + + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil) + + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return( + []volumes.VolumeProperties{ + {VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000}, + {VolumeID: "ebs-volume-2", VolumeType: "gp3", Size: 105, Iops: 4000}, + {VolumeID: "ebs-volume-3", VolumeType: "gp3", Size: 151, Iops: 6000, Throughput: 275}}, nil) + + // expect only gp2 volume to be modified + resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq(aws.String("gp3")), gomock.Eq(aws.Int64(150)), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil) + resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-2"), gomock.Eq(aws.String("gp3")), gomock.Eq(aws.Int64(150)), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil) + // resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-3"), gomock.Eq(aws.String("gp3")), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + + cluster.VolumeResizer = resizer + cluster.syncVolumes() +} + +func TestManualGp2Gp3Support(t *testing.T) { + client, _ := newFakeK8sPVCclient() + clusterName := "acid-test-cluster" + namespace := "default" + + // new cluster with pvc storage resize mode and configured labels + var cluster = New( + Config{ + OpConfig: config.Config{ + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + }, + StorageResizeMode: "mixed", + EnableEBSGp3Migration: false, + EnableEBSGp3MigrationMaxSize: 1000, + }, + }, client, acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec.Volume.Size = "150Gi" + cluster.Spec.Volume.Iops = aws.Int64(6000) + cluster.Spec.Volume.Throughput = aws.Int64(275) + + // set metadata, so that labels will get correct values + cluster.Name = clusterName + cluster.Namespace = namespace + filterLabels := cluster.labelsSet(false) + + testVolumes := []testVolume{ + { + size: 100, + }, + { + size: 100, + }, + } + + initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resizer := mocks.NewMockVolumeResizer(ctrl) + + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( + []volumes.VolumeProperties{ + {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, + {VolumeID: "ebs-volume-2", VolumeType: "gp2", Size: 150, Iops: 4000}, + }, nil) + + // expect only gp2 volume to be modified + resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq(aws.String("gp3")), gomock.Nil(), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil) + resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-2"), gomock.Eq(aws.String("gp3")), gomock.Nil(), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil) + + cluster.VolumeResizer = resizer + cluster.syncVolumes() +} + +func TestDontTouchType(t *testing.T) { + client, _ := newFakeK8sPVCclient() + clusterName := "acid-test-cluster" + namespace := "default" + + // new cluster with pvc storage resize mode and configured labels + var cluster = New( + Config{ + OpConfig: config.Config{ + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + }, + StorageResizeMode: "mixed", + EnableEBSGp3Migration: false, + EnableEBSGp3MigrationMaxSize: 1000, + }, + }, client, acidv1.Postgresql{}, logger, eventRecorder) + + cluster.Spec.Volume.Size = "177Gi" + + // set metadata, so that labels will get correct values + cluster.Name = clusterName + cluster.Namespace = namespace + filterLabels := cluster.labelsSet(false) + + testVolumes := []testVolume{ + { + size: 150, + }, + { + size: 150, + }, + } + + initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resizer := mocks.NewMockVolumeResizer(ctrl) + + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) + resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( + []volumes.VolumeProperties{ + {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, + {VolumeID: "ebs-volume-2", VolumeType: "gp2", Size: 150, Iops: 4000}, + }, nil) + + // expect only gp2 volume to be modified + resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Nil(), gomock.Eq(aws.Int64(177)), gomock.Nil(), gomock.Nil()).Return(nil) + resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-2"), gomock.Nil(), gomock.Eq(aws.Int64(177)), gomock.Nil(), gomock.Nil()).Return(nil) + + cluster.VolumeResizer = resizer + cluster.syncVolumes() +} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 989a66f96..63d72ce55 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -1,41 +1,54 @@ package controller import ( + "bytes" + "context" + "encoding/json" "fmt" "os" + "strings" "sync" + "time" "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/cache" - + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/apiserver" "github.com/zalando/postgres-operator/pkg/cluster" + acidv1informer "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/teams" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/ringlog" - - acidv1informer "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/reference" ) // Controller represents operator controller type Controller struct { - config spec.ControllerConfig - opConfig *config.Config + config spec.ControllerConfig + opConfig *config.Config + pgTeamMap teams.PostgresTeamMap logger *logrus.Entry KubeClient k8sutil.KubernetesClient apiserver *apiserver.Server + eventRecorder record.EventRecorder + eventBroadcaster record.EventBroadcaster + stopCh chan struct{} + controllerID string curWorkerID uint32 //initialized with 0 curWorkerCluster sync.Map clusterWorkers map[spec.NamespacedName]uint32 @@ -45,10 +58,11 @@ type Controller struct { clusterHistory map[spec.NamespacedName]ringlog.RingLogger // history of the cluster changes teamClusters map[string][]spec.NamespacedName - postgresqlInformer cache.SharedIndexInformer - podInformer cache.SharedIndexInformer - nodesInformer cache.SharedIndexInformer - podCh chan cluster.PodEvent + postgresqlInformer cache.SharedIndexInformer + postgresTeamInformer cache.SharedIndexInformer + podInformer cache.SharedIndexInformer + nodesInformer cache.SharedIndexInformer + podCh chan cluster.PodEvent clusterEventQueues []*cache.FIFO // [workerID]Queue lastClusterSyncTime int64 @@ -62,13 +76,35 @@ type Controller struct { } // NewController creates a new controller -func NewController(controllerConfig *spec.ControllerConfig) *Controller { +func NewController(controllerConfig *spec.ControllerConfig, controllerId string) *Controller { logger := logrus.New() + if controllerConfig.EnableJsonLogging { + logger.SetFormatter(&logrus.JSONFormatter{}) + } else { + if os.Getenv("LOG_NOQUOTE") != "" { + logger.SetFormatter(&logrus.TextFormatter{PadLevelText: true, DisableQuote: true}) + } + } + + var myComponentName = "postgres-operator" + if controllerId != "" { + myComponentName += "/" + controllerId + } + + eventBroadcaster := record.NewBroadcaster() + + // disabling the sending of events also to the logoutput + // the operator currently duplicates a lot of log entries with this setup + // eventBroadcaster.StartLogging(logger.Infof) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: myComponentName}) c := &Controller{ config: *controllerConfig, opConfig: &config.Config{}, logger: logger.WithField("pkg", "controller"), + eventRecorder: recorder, + eventBroadcaster: eventBroadcaster, + controllerID: controllerId, curWorkerCluster: sync.Map{}, clusterWorkers: make(map[spec.NamespacedName]uint32), clusters: make(map[spec.NamespacedName]*cluster.Cluster), @@ -90,6 +126,11 @@ func (c *Controller) initClients() { if err != nil { c.logger.Fatalf("could not create kubernetes clients: %v", err) } + c.eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: c.KubeClient.EventsGetter.Events("")}) + if err != nil { + c.logger.Fatalf("could not setup kubernetes event sink: %v", err) + } + } func (c *Controller) initOperatorConfig() { @@ -97,7 +138,7 @@ func (c *Controller) initOperatorConfig() { if c.config.ConfigMapName != (spec.NamespacedName{}) { configMap, err := c.KubeClient.ConfigMaps(c.config.ConfigMapName.Namespace). - Get(c.config.ConfigMapName.Name, metav1.GetOptions{}) + Get(context.TODO(), c.config.ConfigMapName.Name, metav1.GetOptions{}) if err != nil { panic(err) } @@ -156,12 +197,25 @@ func (c *Controller) warnOnDeprecatedOperatorParameters() { c.logger.Warningf("Operator configuration parameter 'enable_load_balancer' is deprecated and takes no effect. " + "Consider using the 'enable_master_load_balancer' or 'enable_replica_load_balancer' instead.") } + + if len(c.opConfig.SidecarImages) > 0 { + c.logger.Warningf("Operator configuration parameter 'sidecar_docker_images' is deprecated. " + + "Consider using 'sidecars' instead.") + } +} + +func compactValue(v string) string { + var compact bytes.Buffer + if err := json.Compact(&compact, []byte(v)); err != nil { + panic("Hard coded json strings broken!") + } + return compact.String() } func (c *Controller) initPodServiceAccount() { if c.opConfig.PodServiceAccountDefinition == "" { - c.opConfig.PodServiceAccountDefinition = ` + stringValue := ` { "apiVersion": "v1", "kind": "ServiceAccount", @@ -169,6 +223,9 @@ func (c *Controller) initPodServiceAccount() { "name": "postgres-pod" } }` + + c.opConfig.PodServiceAccountDefinition = compactValue(stringValue) + } // re-uses k8s internal parsing. See k8s client-go issue #193 for explanation @@ -292,7 +349,7 @@ func (c *Controller) initRoleBinding() { // operator binds it to the cluster role with sufficient privileges // we assume the role is created by the k8s administrator if c.opConfig.PodServiceAccountRoleBindingDefinition == "" { - c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(` + stringValue := fmt.Sprintf(` { "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "RoleBinding", @@ -311,6 +368,7 @@ func (c *Controller) initRoleBinding() { } ] }`, c.PodServiceAccount.Name, c.PodServiceAccount.Name, c.PodServiceAccount.Name) + c.opConfig.PodServiceAccountRoleBindingDefinition = compactValue(stringValue) } c.logger.Info("Parse role bindings") // re-uses k8s internal parsing. See k8s client-go issue #193 for explanation @@ -329,11 +387,19 @@ func (c *Controller) initRoleBinding() { } - // actual roles bindings are deployed at the time of Postgres/Spilo cluster creation + // actual roles bindings ar*logrus.Entrye deployed at the time of Postgres/Spilo cluster creation +} + +func logMultiLineConfig(log *logrus.Entry, config string) { + lines := strings.Split(config, "\n") + for _, l := range lines { + log.Infof("%s", l) + } } func (c *Controller) initController() { c.initClients() + c.controllerID = os.Getenv("CONTROLLER_ID") if configObjectName := os.Getenv("POSTGRES_OPERATOR_CONFIGURATION_OBJECT"); configObjectName != "" { if err := c.createConfigurationCRD(c.opConfig.EnableCRDValidation); err != nil { @@ -360,16 +426,22 @@ func (c *Controller) initController() { c.logger.Fatalf("could not register Postgres CustomResourceDefinition: %v", err) } - c.initPodServiceAccount() c.initSharedInformers() + if c.opConfig.EnablePostgresTeamCRD { + c.loadPostgresTeams() + } else { + c.pgTeamMap = teams.PostgresTeamMap{} + } + if c.opConfig.DebugLogging { c.logger.Logger.Level = logrus.DebugLevel } - c.logger.Infof("config: %s", c.opConfig.MustMarshal()) + logMultiLineConfig(c.logger, c.opConfig.MustMarshal()) - if infraRoles, err := c.getInfrastructureRoles(&c.opConfig.InfrastructureRolesSecretName); err != nil { + roleDefs := c.getInfrastructureRoleDefinitions() + if infraRoles, err := c.getInfrastructureRoles(roleDefs); err != nil { c.logger.Warningf("could not get infrastructure roles: %v", err) } else { c.config.InfrastructureRoles = infraRoles @@ -393,6 +465,7 @@ func (c *Controller) initController() { func (c *Controller) initSharedInformers() { + // Postgresqls c.postgresqlInformer = acidv1informer.NewPostgresqlInformer( c.KubeClient.AcidV1ClientSet, c.opConfig.WatchedNamespace, @@ -405,6 +478,20 @@ func (c *Controller) initSharedInformers() { DeleteFunc: c.postgresqlDelete, }) + // PostgresTeams + if c.opConfig.EnablePostgresTeamCRD { + c.postgresTeamInformer = acidv1informer.NewPostgresTeamInformer( + c.KubeClient.AcidV1ClientSet, + c.opConfig.WatchedNamespace, + constants.QueueResyncPeriodTPR*6, // 30 min + cache.Indexers{}) + + c.postgresTeamInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.postgresTeamAdd, + UpdateFunc: c.postgresTeamUpdate, + }) + } + // Pods podLw := &cache.ListWatch{ ListFunc: c.podListFunc, @@ -465,6 +552,10 @@ func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { go c.apiserver.Run(stopCh, wg) go c.kubeNodesInformer(stopCh, wg) + if c.opConfig.EnablePostgresTeamCRD { + go c.runPostgresTeamInformer(stopCh, wg) + } + c.logger.Info("started working in background") } @@ -480,6 +571,12 @@ func (c *Controller) runPostgresqlInformer(stopCh <-chan struct{}, wg *sync.Wait c.postgresqlInformer.Run(stopCh) } +func (c *Controller) runPostgresTeamInformer(stopCh <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + + c.postgresTeamInformer.Run(stopCh) +} + func queueClusterKey(eventType EventType, uid types.UID) string { return fmt.Sprintf("%s-%s", eventType, uid) } @@ -501,7 +598,7 @@ func (c *Controller) getEffectiveNamespace(namespaceFromEnvironment, namespaceFr } else { - if _, err := c.KubeClient.Namespaces().Get(namespace, metav1.GetOptions{}); err != nil { + if _, err := c.KubeClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); err != nil { c.logger.Fatalf("Could not find the watched namespace %q", namespace) } else { c.logger.Infof("Listenting to the specific namespace %q", namespace) @@ -511,3 +608,57 @@ func (c *Controller) getEffectiveNamespace(namespaceFromEnvironment, namespaceFr return namespace } + +// GetReference of Postgres CR object +// i.e. required to emit events to this resource +func (c *Controller) GetReference(postgresql *acidv1.Postgresql) *v1.ObjectReference { + ref, err := reference.GetReference(scheme.Scheme, postgresql) + if err != nil { + c.logger.Errorf("could not get reference for Postgresql CR %v/%v: %v", postgresql.Namespace, postgresql.Name, err) + } + return ref +} + +func (c *Controller) meetsClusterDeleteAnnotations(postgresql *acidv1.Postgresql) error { + + deleteAnnotationDateKey := c.opConfig.DeleteAnnotationDateKey + currentTime := time.Now() + currentDate := currentTime.Format("2006-01-02") // go's reference date + + if deleteAnnotationDateKey != "" { + if deleteDate, ok := postgresql.Annotations[deleteAnnotationDateKey]; ok { + if deleteDate != currentDate { + return fmt.Errorf("annotation %s not matching the current date: got %s, expected %s", deleteAnnotationDateKey, deleteDate, currentDate) + } + } else { + return fmt.Errorf("annotation %s not set in manifest to allow cluster deletion", deleteAnnotationDateKey) + } + } + + deleteAnnotationNameKey := c.opConfig.DeleteAnnotationNameKey + + if deleteAnnotationNameKey != "" { + if clusterName, ok := postgresql.Annotations[deleteAnnotationNameKey]; ok { + if clusterName != postgresql.Name { + return fmt.Errorf("annotation %s not matching the cluster name: got %s, expected %s", deleteAnnotationNameKey, clusterName, postgresql.Name) + } + } else { + return fmt.Errorf("annotation %s not set in manifest to allow cluster deletion", deleteAnnotationNameKey) + } + } + + return nil +} + +// hasOwnership returns true if the controller is the "owner" of the postgresql. +// Whether it's owner is determined by the value of 'acid.zalan.do/controller' +// annotation. If the value matches the controllerID then it owns it, or if the +// controllerID is "" and there's no annotation set. +func (c *Controller) hasOwnership(postgresql *acidv1.Postgresql) bool { + if postgresql.Annotations != nil { + if owner, ok := postgresql.Annotations[constants.PostgresqlControllerAnnotationKey]; ok { + return owner == c.controllerID + } + } + return c.controllerID == "" +} diff --git a/pkg/controller/node.go b/pkg/controller/node.go index 6f7befa27..2836b4f7f 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -1,11 +1,12 @@ package controller import ( + "context" "fmt" "time" "github.com/zalando/postgres-operator/pkg/util/retryutil" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -22,7 +23,7 @@ func (c *Controller) nodeListFunc(options metav1.ListOptions) (runtime.Object, e TimeoutSeconds: options.TimeoutSeconds, } - return c.KubeClient.Nodes().List(opts) + return c.KubeClient.Nodes().List(context.TODO(), opts) } func (c *Controller) nodeWatchFunc(options metav1.ListOptions) (watch.Interface, error) { @@ -32,7 +33,7 @@ func (c *Controller) nodeWatchFunc(options metav1.ListOptions) (watch.Interface, TimeoutSeconds: options.TimeoutSeconds, } - return c.KubeClient.Nodes().Watch(opts) + return c.KubeClient.Nodes().Watch(context.TODO(), opts) } func (c *Controller) nodeAdd(obj interface{}) { @@ -41,7 +42,7 @@ func (c *Controller) nodeAdd(obj interface{}) { return } - c.logger.Debugf("new node has been added: %q (%s)", util.NameFromMeta(node.ObjectMeta), node.Spec.ProviderID) + c.logger.Debugf("new node has been added: %s (%s)", util.NameFromMeta(node.ObjectMeta), node.Spec.ProviderID) // check if the node became not ready while the operator was down (otherwise we would have caught it in nodeUpdate) if !c.nodeIsReady(node) { @@ -75,7 +76,7 @@ func (c *Controller) nodeUpdate(prev, cur interface{}) { } func (c *Controller) nodeIsReady(node *v1.Node) bool { - return (!node.Spec.Unschedulable || util.MapContains(node.Labels, c.opConfig.NodeReadinessLabel) || + return (!node.Spec.Unschedulable || (len(c.opConfig.NodeReadinessLabel) > 0 && util.MapContains(node.Labels, c.opConfig.NodeReadinessLabel)) || util.MapContains(node.Labels, map[string]string{"master": "true"})) } @@ -87,7 +88,7 @@ func (c *Controller) attemptToMoveMasterPodsOffNode(node *v1.Node) error { opts := metav1.ListOptions{ LabelSelector: labels.Set(c.opConfig.ClusterLabels).String(), } - podList, err := c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(opts) + podList, err := c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(context.TODO(), opts) if err != nil { c.logger.Errorf("could not fetch list of the pods: %v", err) return err @@ -172,19 +173,19 @@ func (c *Controller) nodeDelete(obj interface{}) { } func (c *Controller) moveMasterPodsOffNode(node *v1.Node) { - + // retry to move master until configured timeout is reached err := retryutil.Retry(1*time.Minute, c.opConfig.MasterPodMoveTimeout, func() (bool, error) { err := c.attemptToMoveMasterPodsOffNode(node) if err != nil { - return false, fmt.Errorf("unable to move master pods off the unschedulable node; will retry after delay of 1 minute") + return false, err } return true, nil }, ) if err != nil { - c.logger.Warningf("failed to move master pods from the node %q: timeout of %v minutes expired", node.Name, c.opConfig.MasterPodMoveTimeout) + c.logger.Warningf("failed to move master pods from the node %q: %v", node.Name, err) } } diff --git a/pkg/controller/node_test.go b/pkg/controller/node_test.go index c0ec78aa8..a9616e256 100644 --- a/pkg/controller/node_test.go +++ b/pkg/controller/node_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/zalando/postgres-operator/pkg/spec" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -13,10 +13,9 @@ const ( readyValue = "ready" ) -func initializeController() *Controller { - var c = NewController(&spec.ControllerConfig{}) - c.opConfig.NodeReadinessLabel = map[string]string{readyLabel: readyValue} - return c +func newNodeTestController() *Controller { + var controller = NewController(&spec.ControllerConfig{}, "node-test") + return controller } func makeNode(labels map[string]string, isSchedulable bool) *v1.Node { @@ -31,34 +30,65 @@ func makeNode(labels map[string]string, isSchedulable bool) *v1.Node { } } -var c = initializeController() +var nodeTestController = newNodeTestController() func TestNodeIsReady(t *testing.T) { testName := "TestNodeIsReady" var testTable = []struct { - in *v1.Node - out bool + in *v1.Node + out bool + readinessLabel map[string]string }{ { - in: makeNode(map[string]string{"foo": "bar"}, true), - out: true, + in: makeNode(map[string]string{"foo": "bar"}, true), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, }, { - in: makeNode(map[string]string{"foo": "bar"}, false), - out: false, + in: makeNode(map[string]string{"foo": "bar"}, false), + out: false, + readinessLabel: map[string]string{readyLabel: readyValue}, }, { - in: makeNode(map[string]string{readyLabel: readyValue}, false), - out: true, + in: makeNode(map[string]string{readyLabel: readyValue}, false), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, }, { - in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), - out: true, + in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, + }, + { + in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, + }, + { + in: makeNode(map[string]string{"foo": "bar"}, true), + out: true, + readinessLabel: map[string]string{}, + }, + { + in: makeNode(map[string]string{"foo": "bar"}, false), + out: false, + readinessLabel: map[string]string{}, + }, + { + in: makeNode(map[string]string{readyLabel: readyValue}, false), + out: false, + readinessLabel: map[string]string{}, + }, + { + in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), + out: true, + readinessLabel: map[string]string{}, }, } for _, tt := range testTable { - if isReady := c.nodeIsReady(tt.in); isReady != tt.out { - t.Errorf("%s: expected response %t doesn't match the actual %t for the node %#v", + nodeTestController.opConfig.NodeReadinessLabel = tt.readinessLabel + if isReady := nodeTestController.nodeIsReady(tt.in); isReady != tt.out { + t.Errorf("%s: expected response %t does not match the actual %t for the node %#v", testName, tt.out, isReady, tt.in) } } diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 4b18a0902..8d5101167 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -1,18 +1,22 @@ package controller import ( + "context" "fmt" "time" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/constants" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*acidv1.OperatorConfiguration, error) { - config, err := c.KubeClient.AcidV1ClientSet.AcidV1().OperatorConfigurations(configObjectNamespace).Get(configObjectName, metav1.GetOptions{}) + config, err := c.KubeClient.OperatorConfigurationsGetter.OperatorConfigurations(configObjectNamespace).Get( + context.TODO(), configObjectName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("could not get operator configuration object %q: %v", configObjectName, err) } @@ -20,98 +24,138 @@ func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, con return config, nil } +func int32ToPointer(value int32) *int32 { + return &value +} + // importConfigurationFromCRD is a transitional function that converts CRD configuration to the one based on the configmap func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigurationData) *config.Config { result := &config.Config{} // general config - result.EnableCRDValidation = fromCRD.EnableCRDValidation + result.EnableCRDValidation = util.CoalesceBool(fromCRD.EnableCRDValidation, util.True()) + result.EnableLazySpiloUpgrade = fromCRD.EnableLazySpiloUpgrade + result.EnablePgVersionEnvVar = fromCRD.EnablePgVersionEnvVar + result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat result.EtcdHost = fromCRD.EtcdHost - result.DockerImage = fromCRD.DockerImage - result.Workers = fromCRD.Workers + result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps + result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.0-p2") + result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8) result.MinInstances = fromCRD.MinInstances result.MaxInstances = fromCRD.MaxInstances - result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod) - result.RepairPeriod = time.Duration(fromCRD.RepairPeriod) + result.ResyncPeriod = util.CoalesceDuration(time.Duration(fromCRD.ResyncPeriod), "30m") + result.RepairPeriod = util.CoalesceDuration(time.Duration(fromCRD.RepairPeriod), "5m") result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit - result.ShmVolume = fromCRD.ShmVolume - result.Sidecars = fromCRD.Sidecars + result.ShmVolume = util.CoalesceBool(fromCRD.ShmVolume, util.True()) + result.SidecarImages = fromCRD.SidecarImages + result.SidecarContainers = fromCRD.SidecarContainers // user config - result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername - result.ReplicationUsername = fromCRD.PostgresUsersConfiguration.ReplicationUsername + result.SuperUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.SuperUsername, "postgres") + result.ReplicationUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.ReplicationUsername, "standby") // kubernetes config result.CustomPodAnnotations = fromCRD.Kubernetes.CustomPodAnnotations - result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName + result.PodServiceAccountName = util.Coalesce(fromCRD.Kubernetes.PodServiceAccountName, "postgres-pod") result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition result.PodServiceAccountRoleDefinition = fromCRD.Kubernetes.PodServiceAccountRoleDefinition result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition result.PodEnvironmentConfigMap = fromCRD.Kubernetes.PodEnvironmentConfigMap - result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod) + result.PodEnvironmentSecret = fromCRD.Kubernetes.PodEnvironmentSecret + result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m") result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged + result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser + result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup - result.ClusterDomain = fromCRD.Kubernetes.ClusterDomain + result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local") result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat - result.EnablePodDisruptionBudget = fromCRD.Kubernetes.EnablePodDisruptionBudget - result.EnableInitContainers = fromCRD.Kubernetes.EnableInitContainers - result.EnableSidecars = fromCRD.Kubernetes.EnableSidecars + result.EnablePodDisruptionBudget = util.CoalesceBool(fromCRD.Kubernetes.EnablePodDisruptionBudget, util.True()) + result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "pvc") + result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True()) + result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True()) result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName + result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName - result.PodRoleLabel = fromCRD.Kubernetes.PodRoleLabel - result.ClusterLabels = fromCRD.Kubernetes.ClusterLabels + if fromCRD.Kubernetes.InfrastructureRolesDefs != nil { + result.InfrastructureRoles = []*config.InfrastructureRole{} + for _, secret := range fromCRD.Kubernetes.InfrastructureRolesDefs { + result.InfrastructureRoles = append( + result.InfrastructureRoles, + &config.InfrastructureRole{ + SecretName: secret.SecretName, + UserKey: secret.UserKey, + RoleKey: secret.RoleKey, + PasswordKey: secret.PasswordKey, + }) + } + } + + result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role") + result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"}) result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels - result.ClusterNameLabel = fromCRD.Kubernetes.ClusterNameLabel + result.InheritedAnnotations = fromCRD.Kubernetes.InheritedAnnotations + result.DownscalerAnnotations = fromCRD.Kubernetes.DownscalerAnnotations + result.ClusterNameLabel = util.Coalesce(fromCRD.Kubernetes.ClusterNameLabel, "cluster-name") + result.DeleteAnnotationDateKey = fromCRD.Kubernetes.DeleteAnnotationDateKey + result.DeleteAnnotationNameKey = fromCRD.Kubernetes.DeleteAnnotationNameKey result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName - result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy - result.MasterPodMoveTimeout = time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout) + result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready") + result.MasterPodMoveTimeout = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout), "10m") result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity - result.PodAntiAffinityTopologyKey = fromCRD.Kubernetes.PodAntiAffinityTopologyKey + result.PodAntiAffinityTopologyKey = util.Coalesce(fromCRD.Kubernetes.PodAntiAffinityTopologyKey, "kubernetes.io/hostname") // Postgres Pod resources - result.DefaultCPURequest = fromCRD.PostgresPodResources.DefaultCPURequest - result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest - result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit - result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit - result.MinCPULimit = fromCRD.PostgresPodResources.MinCPULimit - result.MinMemoryLimit = fromCRD.PostgresPodResources.MinMemoryLimit + result.DefaultCPURequest = util.Coalesce(fromCRD.PostgresPodResources.DefaultCPURequest, "100m") + result.DefaultMemoryRequest = util.Coalesce(fromCRD.PostgresPodResources.DefaultMemoryRequest, "100Mi") + result.DefaultCPULimit = util.Coalesce(fromCRD.PostgresPodResources.DefaultCPULimit, "1") + result.DefaultMemoryLimit = util.Coalesce(fromCRD.PostgresPodResources.DefaultMemoryLimit, "500Mi") + result.MinCPULimit = util.Coalesce(fromCRD.PostgresPodResources.MinCPULimit, "250m") + result.MinMemoryLimit = util.Coalesce(fromCRD.PostgresPodResources.MinMemoryLimit, "250Mi") // timeout config - result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval) - result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout) - result.PodLabelWaitTimeout = time.Duration(fromCRD.Timeouts.PodLabelWaitTimeout) - result.PodDeletionWaitTimeout = time.Duration(fromCRD.Timeouts.PodDeletionWaitTimeout) - result.ReadyWaitInterval = time.Duration(fromCRD.Timeouts.ReadyWaitInterval) - result.ReadyWaitTimeout = time.Duration(fromCRD.Timeouts.ReadyWaitTimeout) + result.ResourceCheckInterval = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ResourceCheckInterval), "3s") + result.ResourceCheckTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ResourceCheckTimeout), "10m") + result.PodLabelWaitTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.PodLabelWaitTimeout), "10m") + result.PodDeletionWaitTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.PodDeletionWaitTimeout), "10m") + result.ReadyWaitInterval = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ReadyWaitInterval), "4s") + result.ReadyWaitTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ReadyWaitTimeout), "30s") // load balancer config - result.DbHostedZone = fromCRD.LoadBalancer.DbHostedZone + result.DbHostedZone = util.Coalesce(fromCRD.LoadBalancer.DbHostedZone, "db.example.com") result.EnableMasterLoadBalancer = fromCRD.LoadBalancer.EnableMasterLoadBalancer result.EnableReplicaLoadBalancer = fromCRD.LoadBalancer.EnableReplicaLoadBalancer result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat + result.ExternalTrafficPolicy = util.Coalesce(fromCRD.LoadBalancer.ExternalTrafficPolicy, "Cluster") // AWS or GCP config result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket result.AWSRegion = fromCRD.AWSGCP.AWSRegion result.LogS3Bucket = fromCRD.AWSGCP.LogS3Bucket result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole + result.WALGSBucket = fromCRD.AWSGCP.WALGSBucket + result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount - result.AdditionalSecretMountPath = fromCRD.AWSGCP.AdditionalSecretMountPath + result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials") + result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration + result.EnableEBSGp3MigrationMaxSize = util.CoalesceInt64(fromCRD.AWSGCP.EnableEBSGp3MigrationMaxSize, 1000) // logical backup config - result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule - result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage + result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *") + result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.6.0") + result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3") result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region result.LogicalBackupS3Endpoint = fromCRD.LogicalBackup.S3Endpoint result.LogicalBackupS3AccessKeyID = fromCRD.LogicalBackup.S3AccessKeyID result.LogicalBackupS3SecretAccessKey = fromCRD.LogicalBackup.S3SecretAccessKey result.LogicalBackupS3SSE = fromCRD.LogicalBackup.S3SSE + result.LogicalBackupGoogleApplicationCredentials = fromCRD.LogicalBackup.GoogleApplicationCredentials + result.LogicalBackupJobPrefix = util.Coalesce(fromCRD.LogicalBackup.JobPrefix, "logical-backup-") // debug config result.DebugLogging = fromCRD.OperatorDebug.DebugLogging @@ -119,20 +163,22 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur // Teams API config result.EnableTeamsAPI = fromCRD.TeamsAPI.EnableTeamsAPI - result.TeamsAPIUrl = fromCRD.TeamsAPI.TeamsAPIUrl - result.TeamAPIRoleConfiguration = fromCRD.TeamsAPI.TeamAPIRoleConfiguration + result.TeamsAPIUrl = util.Coalesce(fromCRD.TeamsAPI.TeamsAPIUrl, "https://teams.example.com/api/") + result.TeamAPIRoleConfiguration = util.CoalesceStrMap(fromCRD.TeamsAPI.TeamAPIRoleConfiguration, map[string]string{"log_statement": "all"}) result.EnableTeamSuperuser = fromCRD.TeamsAPI.EnableTeamSuperuser result.EnableAdminRoleForUsers = fromCRD.TeamsAPI.EnableAdminRoleForUsers result.TeamAdminRole = fromCRD.TeamsAPI.TeamAdminRole - result.PamRoleName = fromCRD.TeamsAPI.PamRoleName - result.PamConfiguration = fromCRD.TeamsAPI.PamConfiguration - result.ProtectedRoles = fromCRD.TeamsAPI.ProtectedRoles + result.PamRoleName = util.Coalesce(fromCRD.TeamsAPI.PamRoleName, "zalandos") + result.PamConfiguration = util.Coalesce(fromCRD.TeamsAPI.PamConfiguration, "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees") + result.ProtectedRoles = util.CoalesceStrArr(fromCRD.TeamsAPI.ProtectedRoles, []string{"admin"}) result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams + result.EnablePostgresTeamCRD = fromCRD.TeamsAPI.EnablePostgresTeamCRD + result.EnablePostgresTeamCRDSuperusers = fromCRD.TeamsAPI.EnablePostgresTeamCRDSuperusers // logging REST API config - result.APIPort = fromCRD.LoggingRESTAPI.APIPort - result.RingLogLines = fromCRD.LoggingRESTAPI.RingLogLines - result.ClusterHistoryEntries = fromCRD.LoggingRESTAPI.ClusterHistoryEntries + result.APIPort = util.CoalesceInt(fromCRD.LoggingRESTAPI.APIPort, 8080) + result.RingLogLines = util.CoalesceInt(fromCRD.LoggingRESTAPI.RingLogLines, 100) + result.ClusterHistoryEntries = util.CoalesceInt(fromCRD.LoggingRESTAPI.ClusterHistoryEntries, 1000) // Scalyr config result.ScalyrAPIKey = fromCRD.Scalyr.ScalyrAPIKey @@ -143,5 +189,56 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.ScalyrCPULimit = fromCRD.Scalyr.ScalyrCPULimit result.ScalyrMemoryLimit = fromCRD.Scalyr.ScalyrMemoryLimit + // Connection pooler. Looks like we can't use defaulting in CRD before 1.17, + // so ensure default values here. + result.ConnectionPooler.NumberOfInstances = util.CoalesceInt32( + fromCRD.ConnectionPooler.NumberOfInstances, + int32ToPointer(2)) + + result.ConnectionPooler.NumberOfInstances = util.MaxInt32( + result.ConnectionPooler.NumberOfInstances, + int32ToPointer(2)) + + result.ConnectionPooler.Schema = util.Coalesce( + fromCRD.ConnectionPooler.Schema, + constants.ConnectionPoolerSchemaName) + + result.ConnectionPooler.User = util.Coalesce( + fromCRD.ConnectionPooler.User, + constants.ConnectionPoolerUserName) + + if result.ConnectionPooler.User == result.SuperUsername { + msg := "Connection pool user is not allowed to be the same as super user, username: %s" + panic(fmt.Errorf(msg, result.ConnectionPooler.User)) + } + + result.ConnectionPooler.Image = util.Coalesce( + fromCRD.ConnectionPooler.Image, + "registry.opensource.zalan.do/acid/pgbouncer") + + result.ConnectionPooler.Mode = util.Coalesce( + fromCRD.ConnectionPooler.Mode, + constants.ConnectionPoolerDefaultMode) + + result.ConnectionPooler.ConnectionPoolerDefaultCPURequest = util.Coalesce( + fromCRD.ConnectionPooler.DefaultCPURequest, + constants.ConnectionPoolerDefaultCpuRequest) + + result.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest = util.Coalesce( + fromCRD.ConnectionPooler.DefaultMemoryRequest, + constants.ConnectionPoolerDefaultMemoryRequest) + + result.ConnectionPooler.ConnectionPoolerDefaultCPULimit = util.Coalesce( + fromCRD.ConnectionPooler.DefaultCPULimit, + constants.ConnectionPoolerDefaultCpuLimit) + + result.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit = util.Coalesce( + fromCRD.ConnectionPooler.DefaultMemoryLimit, + constants.ConnectionPoolerDefaultMemoryLimit) + + result.ConnectionPooler.MaxDBConnections = util.CoalesceInt32( + fromCRD.ConnectionPooler.MaxDBConnections, + int32ToPointer(constants.ConnectionPoolerMaxDBConnections)) + return result } diff --git a/pkg/controller/pod.go b/pkg/controller/pod.go index 27fd6c956..0defe88b1 100644 --- a/pkg/controller/pod.go +++ b/pkg/controller/pod.go @@ -1,7 +1,9 @@ package controller import ( - "k8s.io/api/core/v1" + "context" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" @@ -19,7 +21,7 @@ func (c *Controller) podListFunc(options metav1.ListOptions) (runtime.Object, er TimeoutSeconds: options.TimeoutSeconds, } - return c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(opts) + return c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(context.TODO(), opts) } func (c *Controller) podWatchFunc(options metav1.ListOptions) (watch.Interface, error) { @@ -29,7 +31,7 @@ func (c *Controller) podWatchFunc(options metav1.ListOptions) (watch.Interface, TimeoutSeconds: options.TimeoutSeconds, } - return c.KubeClient.Pods(c.opConfig.WatchedNamespace).Watch(opts) + return c.KubeClient.Pods(c.opConfig.WatchedNamespace).Watch(context.TODO(), opts) } func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event cluster.PodEvent) { diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index 6a850a339..e0bf4bdab 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -1,6 +1,8 @@ package controller import ( + "context" + "encoding/json" "fmt" "reflect" "strings" @@ -10,6 +12,7 @@ import ( "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" @@ -40,12 +43,23 @@ func (c *Controller) clusterResync(stopCh <-chan struct{}, wg *sync.WaitGroup) { // clusterListFunc obtains a list of all PostgreSQL clusters func (c *Controller) listClusters(options metav1.ListOptions) (*acidv1.PostgresqlList, error) { + var pgList acidv1.PostgresqlList + // TODO: use the SharedInformer cache instead of quering Kubernetes API directly. - list, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.opConfig.WatchedNamespace).List(options) + list, err := c.KubeClient.PostgresqlsGetter.Postgresqls(c.opConfig.WatchedNamespace).List(context.TODO(), options) if err != nil { c.logger.Errorf("could not list postgresql objects: %v", err) } - return list, err + if c.controllerID != "" { + c.logger.Debugf("watch only clusters with controllerID %q", c.controllerID) + } + for _, pg := range list.Items { + if pg.Error == "" && c.hasOwnership(&pg) { + pgList.Items = append(pgList.Items, pg) + } + } + + return &pgList, err } // clusterListAndSync lists all manifests and decides whether to run the sync or repair. @@ -145,7 +159,7 @@ func (c *Controller) acquireInitialListOfClusters() error { } func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) *cluster.Cluster { - cl := cluster.New(c.makeClusterConfig(), c.KubeClient, *pgSpec, lg) + cl := cluster.New(c.makeClusterConfig(), c.KubeClient, *pgSpec, lg, c.eventRecorder) cl.Run(c.stopCh) teamName := strings.ToLower(cl.Spec.TeamID) @@ -211,11 +225,11 @@ func (c *Controller) processEvent(event ClusterEvent) { switch event.EventType { case EventAdd: if clusterFound { - lg.Debugf("cluster already exists") + lg.Infof("Recieved add event for already existing Postgres cluster") return } - lg.Infof("creation of the cluster started") + lg.Infof("creating a new Postgres cluster") cl = c.addCluster(lg, clusterName, event.NewSpec) @@ -224,6 +238,7 @@ func (c *Controller) processEvent(event ClusterEvent) { if err := cl.Create(); err != nil { cl.Error = fmt.Sprintf("could not create cluster: %v", err) lg.Error(cl.Error) + c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Create", "%v", cl.Error) return } @@ -262,6 +277,8 @@ func (c *Controller) processEvent(event ClusterEvent) { c.curWorkerCluster.Store(event.WorkerID, cl) cl.Delete() + // Fixme - no error handling for delete ? + // c.eventRecorder.Eventf(cl.GetReference, v1.EventTypeWarning, "Delete", "%v", cl.Error) func() { defer c.clustersMu.Unlock() @@ -292,6 +309,7 @@ func (c *Controller) processEvent(event ClusterEvent) { c.curWorkerCluster.Store(event.WorkerID, cl) if err := cl.Sync(event.NewSpec); err != nil { cl.Error = fmt.Sprintf("could not sync cluster: %v", err) + c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Sync", "%v", cl.Error) lg.Error(cl.Error) return } @@ -403,15 +421,42 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. clusterError = informerNewSpec.Error } + // only allow deletion if delete annotations are set and conditions are met + if eventType == EventDelete { + if err := c.meetsClusterDeleteAnnotations(informerOldSpec); err != nil { + c.logger.WithField("cluster-name", clusterName).Warnf( + "ignoring %q event for cluster %q - manifest does not fulfill delete requirements: %s", eventType, clusterName, err) + c.logger.WithField("cluster-name", clusterName).Warnf( + "please, recreate Postgresql resource %q and set annotations to delete properly", clusterName) + if currentManifest, marshalErr := json.Marshal(informerOldSpec); marshalErr != nil { + c.logger.WithField("cluster-name", clusterName).Warnf("could not marshal current manifest:\n%+v", informerOldSpec) + } else { + c.logger.WithField("cluster-name", clusterName).Warnf("%s\n", string(currentManifest)) + } + return + } + } + if clusterError != "" && eventType != EventDelete { - c.logger. - WithField("cluster-name", clusterName). - Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) + c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) + + switch eventType { + case EventAdd: + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed) + c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError) + case EventUpdate: + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed) + c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError) + default: + c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed) + c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError) + } + return } // Don't pass the spec directly from the informer, since subsequent modifications of it would be reflected - // in the informer internal state, making it incohherent with the actual Kubernetes object (and, as a side + // in the informer internal state, making it incoherent with the actual Kubernetes object (and, as a side // effect, the modified state will be returned together with subsequent events). workerID := c.clusterWorkerID(clusterName) @@ -428,7 +473,7 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. if err := c.clusterEventQueues[workerID].Add(clusterEvent); err != nil { lg.Errorf("error while queueing cluster event: %v", clusterEvent) } - lg.Infof("%q event has been queued", eventType) + lg.Infof("%s event has been queued", eventType) if eventType != EventDelete { return @@ -449,47 +494,56 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. if err != nil { lg.Warningf("could not delete event from the queue: %v", err) } else { - lg.Debugf("event %q has been discarded for the cluster", evType) + lg.Debugf("event %s has been discarded for the cluster", evType) } } } func (c *Controller) postgresqlAdd(obj interface{}) { - pg, ok := obj.(*acidv1.Postgresql) - if !ok { - c.logger.Errorf("could not cast to postgresql spec") - return + pg := c.postgresqlCheck(obj) + if pg != nil { + // We will not get multiple Add events for the same cluster + c.queueClusterEvent(nil, pg, EventAdd) } - // We will not get multiple Add events for the same cluster - c.queueClusterEvent(nil, pg, EventAdd) + return } func (c *Controller) postgresqlUpdate(prev, cur interface{}) { - pgOld, ok := prev.(*acidv1.Postgresql) - if !ok { - c.logger.Errorf("could not cast to postgresql spec") - } - pgNew, ok := cur.(*acidv1.Postgresql) - if !ok { - c.logger.Errorf("could not cast to postgresql spec") - } - // Avoid the inifinite recursion for status updates - if reflect.DeepEqual(pgOld.Spec, pgNew.Spec) { - return + pgOld := c.postgresqlCheck(prev) + pgNew := c.postgresqlCheck(cur) + if pgOld != nil && pgNew != nil { + // Avoid the inifinite recursion for status updates + if reflect.DeepEqual(pgOld.Spec, pgNew.Spec) { + if reflect.DeepEqual(pgNew.Annotations, pgOld.Annotations) { + return + } + } + c.queueClusterEvent(pgOld, pgNew, EventUpdate) } - c.queueClusterEvent(pgOld, pgNew, EventUpdate) + return } func (c *Controller) postgresqlDelete(obj interface{}) { + pg := c.postgresqlCheck(obj) + if pg != nil { + c.queueClusterEvent(pg, nil, EventDelete) + } + + return +} + +func (c *Controller) postgresqlCheck(obj interface{}) *acidv1.Postgresql { pg, ok := obj.(*acidv1.Postgresql) if !ok { c.logger.Errorf("could not cast to postgresql spec") - return + return nil } - - c.queueClusterEvent(pg, nil, EventDelete) + if !c.hasOwnership(pg) { + return nil + } + return pg } /* @@ -525,15 +579,14 @@ func (c *Controller) submitRBACCredentials(event ClusterEvent) error { func (c *Controller) createPodServiceAccount(namespace string) error { podServiceAccountName := c.opConfig.PodServiceAccountName - - _, err := c.KubeClient.ServiceAccounts(namespace).Get(podServiceAccountName, metav1.GetOptions{}) + _, err := c.KubeClient.ServiceAccounts(namespace).Get(context.TODO(), podServiceAccountName, metav1.GetOptions{}) if k8sutil.ResourceNotFound(err) { c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace)) // get a separate copy of service account // to prevent a race condition when setting a namespace for many clusters sa := *c.PodServiceAccount - if _, err = c.KubeClient.ServiceAccounts(namespace).Create(&sa); err != nil { + if _, err = c.KubeClient.ServiceAccounts(namespace).Create(context.TODO(), &sa, metav1.CreateOptions{}); err != nil { return fmt.Errorf("cannot deploy the pod service account %q defined in the configuration to the %q namespace: %v", podServiceAccountName, namespace, err) } c.logger.Infof("successfully deployed the pod service account %q to the %q namespace", podServiceAccountName, namespace) @@ -572,14 +625,14 @@ func (c *Controller) createRoleBindings(namespace string) error { podServiceAccountName := c.opConfig.PodServiceAccountName podServiceAccountRoleBindingName := c.PodServiceAccountRoleBinding.Name - _, err := c.KubeClient.RoleBindings(namespace).Get(podServiceAccountRoleBindingName, metav1.GetOptions{}) + _, err := c.KubeClient.RoleBindings(namespace).Get(context.TODO(), podServiceAccountRoleBindingName, metav1.GetOptions{}) if k8sutil.ResourceNotFound(err) { c.logger.Infof("Creating the role binding %q in the %q namespace", podServiceAccountRoleBindingName, namespace) // get a separate copy of role binding // to prevent a race condition when setting a namespace for many clusters rb := *c.PodServiceAccountRoleBinding - _, err = c.KubeClient.RoleBindings(namespace).Create(&rb) + _, err = c.KubeClient.RoleBindings(namespace).Create(context.TODO(), &rb, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("cannot bind the pod service account %q defined in the configuration to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err) } diff --git a/pkg/controller/postgresql_test.go b/pkg/controller/postgresql_test.go index 3d7785f92..71d23a264 100644 --- a/pkg/controller/postgresql_test.go +++ b/pkg/controller/postgresql_test.go @@ -1,10 +1,14 @@ package controller import ( - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" - "github.com/zalando/postgres-operator/pkg/spec" + "fmt" "reflect" "testing" + "time" + + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/spec" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var ( @@ -12,9 +16,55 @@ var ( False = false ) -func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) { - c := NewController(&spec.ControllerConfig{}) +func newPostgresqlTestController() *Controller { + controller := NewController(&spec.ControllerConfig{}, "postgresql-test") + return controller +} +var postgresqlTestController = newPostgresqlTestController() + +func TestControllerOwnershipOnPostgresql(t *testing.T) { + tests := []struct { + name string + pg *acidv1.Postgresql + owned bool + error string + }{ + { + "Postgres cluster with defined ownership of mocked controller", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{"acid.zalan.do/controller": "postgresql-test"}, + }, + }, + True, + "Postgres cluster should be owned by operator, but controller says no", + }, + { + "Postgres cluster with defined ownership of another controller", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{"acid.zalan.do/controller": "stups-test"}, + }, + }, + False, + "Postgres cluster should be owned by another operator, but controller say yes", + }, + { + "Test Postgres cluster without defined ownership", + &acidv1.Postgresql{}, + False, + "Postgres cluster should be owned by operator with empty controller ID, but controller says yes", + }, + } + for _, tt := range tests { + if postgresqlTestController.hasOwnership(tt.pg) != tt.owned { + t.Errorf("%s: %v", tt.name, tt.error) + } + } +} + +func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) { tests := []struct { name string in *acidv1.PostgresSpec @@ -36,9 +86,94 @@ func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) { }, } for _, tt := range tests { - result := c.mergeDeprecatedPostgreSQLSpecParameters(tt.in) + result := postgresqlTestController.mergeDeprecatedPostgreSQLSpecParameters(tt.in) if !reflect.DeepEqual(result, tt.out) { t.Errorf("%s: %v", tt.name, tt.error) } } } + +func TestMeetsClusterDeleteAnnotations(t *testing.T) { + // set delete annotations in configuration + postgresqlTestController.opConfig.DeleteAnnotationDateKey = "delete-date" + postgresqlTestController.opConfig.DeleteAnnotationNameKey = "delete-clustername" + + currentTime := time.Now() + today := currentTime.Format("2006-01-02") // go's reference date + clusterName := "acid-test-cluster" + + tests := []struct { + name string + pg *acidv1.Postgresql + error string + }{ + { + "Postgres cluster with matching delete annotations", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "delete-date": today, + "delete-clustername": clusterName, + }, + }, + }, + "", + }, + { + "Postgres cluster with violated delete date annotation", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "delete-date": "2020-02-02", + "delete-clustername": clusterName, + }, + }, + }, + fmt.Sprintf("annotation delete-date not matching the current date: got 2020-02-02, expected %s", today), + }, + { + "Postgres cluster with violated delete cluster name annotation", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "delete-date": today, + "delete-clustername": "acid-minimal-cluster", + }, + }, + }, + fmt.Sprintf("annotation delete-clustername not matching the cluster name: got acid-minimal-cluster, expected %s", clusterName), + }, + { + "Postgres cluster with missing delete annotations", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{}, + }, + }, + "annotation delete-date not set in manifest to allow cluster deletion", + }, + { + "Postgres cluster with missing delete cluster name annotation", + &acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "delete-date": today, + }, + }, + }, + "annotation delete-clustername not set in manifest to allow cluster deletion", + }, + } + for _, tt := range tests { + if err := postgresqlTestController.meetsClusterDeleteAnnotations(tt.pg); err != nil { + if !reflect.DeepEqual(err.Error(), tt.error) { + t.Errorf("Expected error %q, got: %v", tt.error, err) + } + } + } +} diff --git a/pkg/controller/types.go b/pkg/controller/types.go index 0d86abec8..b598014c9 100644 --- a/pkg/controller/types.go +++ b/pkg/controller/types.go @@ -1,9 +1,10 @@ package controller import ( - "k8s.io/apimachinery/pkg/types" "time" + "k8s.io/apimachinery/pkg/types" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" ) diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 9b7dca063..815bc7b74 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -1,11 +1,13 @@ package controller import ( + "context" "encoding/json" "fmt" + "strings" v1 "k8s.io/api/core/v1" - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" @@ -13,6 +15,8 @@ import ( acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/cluster" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/teams" + "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "gopkg.in/yaml.v2" @@ -27,6 +31,7 @@ func (c *Controller) makeClusterConfig() cluster.Config { return cluster.Config{ RestConfig: c.config.RestConfig, OpConfig: config.Copy(c.opConfig), + PgTeamMap: c.pgTeamMap, InfrastructureRoles: infrastructureRoles, PodServiceAccount: c.PodServiceAccount, } @@ -49,8 +54,8 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 { return c.clusterWorkers[clusterName] } -func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefinition) error { - if _, err := c.KubeClient.CustomResourceDefinitions().Create(crd); err != nil { +func (c *Controller) createOperatorCRD(crd *apiextv1.CustomResourceDefinition) error { + if _, err := c.KubeClient.CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil { if k8sutil.ResourceAlreadyExists(err) { c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name) @@ -58,7 +63,8 @@ func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefiniti if err != nil { return fmt.Errorf("could not marshal new customResourceDefintion: %v", err) } - if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil { + if _, err := c.KubeClient.CustomResourceDefinitions().Patch( + context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil { return fmt.Errorf("could not update customResourceDefinition: %v", err) } } else { @@ -69,19 +75,19 @@ func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefiniti } return wait.Poll(c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, func() (bool, error) { - c, err := c.KubeClient.CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) if err != nil { return false, err } for _, cond := range c.Status.Conditions { switch cond.Type { - case apiextv1beta1.Established: - if cond.Status == apiextv1beta1.ConditionTrue { + case apiextv1.Established: + if cond.Status == apiextv1.ConditionTrue { return true, err } - case apiextv1beta1.NamesAccepted: - if cond.Status == apiextv1beta1.ConditionFalse { + case apiextv1.NamesAccepted: + if cond.Status == apiextv1.ConditionFalse { return false, fmt.Errorf("name conflict: %v", cond.Reason) } } @@ -107,61 +113,262 @@ func readDecodedRole(s string) (*spec.PgUser, error) { return &result, nil } -func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (map[string]spec.PgUser, error) { - if *rolesSecret == (spec.NamespacedName{}) { +var emptyName = (spec.NamespacedName{}) + +// Return information about what secrets we need to use to create +// infrastructure roles and in which format are they. This is done in +// compatible way, so that the previous logic is not changed, and handles both +// configuration in ConfigMap & CRD. +func (c *Controller) getInfrastructureRoleDefinitions() []*config.InfrastructureRole { + var roleDef config.InfrastructureRole + + // take from CRD configuration + rolesDefs := c.opConfig.InfrastructureRoles + + // check if we can extract something from the configmap config option + if c.opConfig.InfrastructureRolesDefs != "" { + // The configmap option could contain either a role description (in the + // form key1: value1, key2: value2), which has to be used together with + // an old secret name. + + var secretName spec.NamespacedName + var err error + propertySep := "," + valueSep := ":" + + // The field contains the format in which secret is written, let's + // convert it to a proper definition + properties := strings.Split(c.opConfig.InfrastructureRolesDefs, propertySep) + roleDef = config.InfrastructureRole{Template: false} + + for _, property := range properties { + values := strings.Split(property, valueSep) + if len(values) < 2 { + continue + } + name := strings.TrimSpace(values[0]) + value := strings.TrimSpace(values[1]) + + switch name { + case "secretname": + if err = secretName.DecodeWorker(value, "default"); err != nil { + c.logger.Warningf("Could not marshal secret name %s: %v", value, err) + } else { + roleDef.SecretName = secretName + } + case "userkey": + roleDef.UserKey = value + case "passwordkey": + roleDef.PasswordKey = value + case "rolekey": + roleDef.RoleKey = value + case "defaultuservalue": + roleDef.DefaultUserValue = value + case "defaultrolevalue": + roleDef.DefaultRoleValue = value + default: + c.logger.Warningf("Role description is not known: %s", properties) + } + } + + if roleDef.SecretName != emptyName && + (roleDef.UserKey != "" || roleDef.DefaultUserValue != "") && + roleDef.PasswordKey != "" { + rolesDefs = append(rolesDefs, &roleDef) + } + } + + if c.opConfig.InfrastructureRolesSecretName != emptyName { + // At this point we deal with the old format, let's replicate it + // via existing definition structure and remember that it's just a + // template, the real values are in user1,password1,inrole1 etc. + rolesDefs = append(rolesDefs, &config.InfrastructureRole{ + SecretName: c.opConfig.InfrastructureRolesSecretName, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }) + } + + return rolesDefs +} + +func (c *Controller) getInfrastructureRoles( + rolesSecrets []*config.InfrastructureRole) ( + map[string]spec.PgUser, []error) { + + var errors []error + var noRolesProvided = true + + roles := []spec.PgUser{} + uniqRoles := map[string]spec.PgUser{} + + // To be compatible with the legacy implementation we need to return nil if + // the provided secret name is empty. The equivalent situation in the + // current implementation is an empty rolesSecrets slice or all its items + // are empty. + for _, role := range rolesSecrets { + if role.SecretName != emptyName { + noRolesProvided = false + } + } + + if noRolesProvided { + return nil, nil + } + + for _, secret := range rolesSecrets { + infraRoles, err := c.getInfrastructureRole(secret) + + if err != nil || infraRoles == nil { + c.logger.Debugf("Cannot get infrastructure role: %+v", *secret) + + if err != nil { + errors = append(errors, err) + } + + continue + } + + for _, r := range infraRoles { + roles = append(roles, r) + } + } + + for _, r := range roles { + if _, exists := uniqRoles[r.Name]; exists { + msg := "Conflicting infrastructure roles: roles[%s] = (%q, %q)" + c.logger.Debugf(msg, r.Name, uniqRoles[r.Name], r) + } + + uniqRoles[r.Name] = r + } + + return uniqRoles, errors +} + +// Generate list of users representing one infrastructure role based on its +// description in various K8S objects. An infrastructure role could be +// described by a secret and optionally a config map. The former should contain +// the secret information, i.e. username, password, role. The latter could +// contain an extensive description of the role and even override an +// information obtained from the secret (except a password). +// +// This function returns a list of users to be compatible with the previous +// behaviour, since we don't know how many users are actually encoded in the +// secret if it's a "template" role. If the provided role is not a template +// one, the result would be a list with just one user in it. +// +// FIXME: This dependency on two different objects is rather unnecessary +// complicated, so let's get rid of it via deprecation process. +func (c *Controller) getInfrastructureRole( + infraRole *config.InfrastructureRole) ( + []spec.PgUser, error) { + + rolesSecret := infraRole.SecretName + roles := []spec.PgUser{} + + if rolesSecret == emptyName { // we don't have infrastructure roles defined, bail out return nil, nil } infraRolesSecret, err := c.KubeClient. Secrets(rolesSecret.Namespace). - Get(rolesSecret.Name, metav1.GetOptions{}) + Get(context.TODO(), rolesSecret.Name, metav1.GetOptions{}) if err != nil { - c.logger.Debugf("infrastructure roles secret name: %q", *rolesSecret) - return nil, fmt.Errorf("could not get infrastructure roles secret: %v", err) + msg := "could not get infrastructure roles secret %s/%s: %v" + return nil, fmt.Errorf(msg, rolesSecret.Namespace, rolesSecret.Name, err) } secretData := infraRolesSecret.Data - result := make(map[string]spec.PgUser) -Users: - // in worst case we would have one line per user - for i := 1; i <= len(secretData); i++ { - properties := []string{"user", "password", "inrole"} - t := spec.PgUser{Origin: spec.RoleOriginInfrastructure} - for _, p := range properties { - key := fmt.Sprintf("%s%d", p, i) - if val, present := secretData[key]; !present { - if p == "user" { - // exit when the user name with the next sequence id is absent - break Users - } - } else { - s := string(val) - switch p { - case "user": - t.Name = s - case "password": - t.Password = s - case "inrole": - t.MemberOf = append(t.MemberOf, s) - default: - c.logger.Warningf("unknown key %q", p) - } + + if infraRole.Template { + Users: + for i := 1; i <= len(secretData); i++ { + properties := []string{ + infraRole.UserKey, + infraRole.PasswordKey, + infraRole.RoleKey, } - delete(secretData, key) + t := spec.PgUser{Origin: spec.RoleOriginInfrastructure} + for _, p := range properties { + key := fmt.Sprintf("%s%d", p, i) + if val, present := secretData[key]; !present { + if p == "user" { + // exit when the user name with the next sequence id is + // absent + break Users + } + } else { + s := string(val) + switch p { + case "user": + t.Name = s + case "password": + t.Password = s + case "inrole": + t.MemberOf = append(t.MemberOf, s) + default: + c.logger.Warningf("unknown key %q", p) + } + } + // XXX: This is a part of the original implementation, which is + // rather obscure. Why do we delete this key? Wouldn't it be + // used later in comparison for configmap? + delete(secretData, key) + } + + if t.Valid() { + roles = append(roles, t) + } else { + msg := "infrastructure role %q is not complete and ignored" + c.logger.Warningf(msg, t) + } + } + } else { + roleDescr := &spec.PgUser{Origin: spec.RoleOriginInfrastructure} + + if details, exists := secretData[infraRole.Details]; exists { + if err := yaml.Unmarshal(details, &roleDescr); err != nil { + return nil, fmt.Errorf("could not decode yaml role: %v", err) + } + } else { + roleDescr.Name = util.Coalesce(string(secretData[infraRole.UserKey]), infraRole.DefaultUserValue) + roleDescr.Password = string(secretData[infraRole.PasswordKey]) + roleDescr.MemberOf = append(roleDescr.MemberOf, + util.Coalesce(string(secretData[infraRole.RoleKey]), infraRole.DefaultRoleValue)) } - if t.Name != "" { - if t.Password == "" { - c.logger.Warningf("infrastructure role %q has no password defined and is ignored", t.Name) - continue - } - result[t.Name] = t + if !roleDescr.Valid() { + msg := "infrastructure role %q is not complete and ignored" + c.logger.Warningf(msg, roleDescr) + + return nil, nil } + + if roleDescr.Name == "" { + msg := "infrastructure role %q has no name defined and is ignored" + c.logger.Warningf(msg, roleDescr.Name) + return nil, nil + } + + if roleDescr.Password == "" { + msg := "infrastructure role %q has no password defined and is ignored" + c.logger.Warningf(msg, roleDescr.Name) + return nil, nil + } + + roles = append(roles, *roleDescr) } - // perhaps we have some map entries with usernames, passwords, let's check if we have those users in the configmap - if infraRolesMap, err := c.KubeClient.ConfigMaps(rolesSecret.Namespace).Get(rolesSecret.Name, metav1.GetOptions{}); err == nil { + // Now plot twist. We need to check if there is a configmap with the same + // name and extract a role description if it exists. + infraRolesMap, err := c.KubeClient. + ConfigMaps(rolesSecret.Namespace). + Get(context.TODO(), rolesSecret.Name, metav1.GetOptions{}) + if err == nil { // we have a configmap with username - json description, let's read and decode it for role, s := range infraRolesMap.Data { roleDescr, err := readDecodedRole(s) @@ -179,20 +386,43 @@ Users: } roleDescr.Name = role roleDescr.Origin = spec.RoleOriginInfrastructure - result[role] = *roleDescr + roles = append(roles, *roleDescr) } } - if len(secretData) > 0 { - c.logger.Warningf("%d unprocessed entries in the infrastructure roles secret,"+ - " checking configmap %v", len(secretData), rolesSecret.Name) - c.logger.Info(`infrastructure role entries should be in the {key}{id} format,` + - ` where {key} can be either of "user", "password", "inrole" and the {id}` + - ` a monotonically increasing integer starting with 1`) - c.logger.Debugf("unprocessed entries: %#v", secretData) + // TODO: check for role collisions + return roles, nil +} + +func (c *Controller) loadPostgresTeams() { + // reset team map + c.pgTeamMap = teams.PostgresTeamMap{} + + pgTeams, err := c.KubeClient.PostgresTeamsGetter.PostgresTeams(c.opConfig.WatchedNamespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + c.logger.Errorf("could not list postgres team objects: %v", err) } - return result, nil + c.pgTeamMap.Load(pgTeams) + c.logger.Debugf("Internal Postgres Team Cache: %#v", c.pgTeamMap) +} + +func (c *Controller) postgresTeamAdd(obj interface{}) { + pgTeam, ok := obj.(*acidv1.PostgresTeam) + if !ok { + c.logger.Errorf("could not cast to PostgresTeam spec") + } + c.logger.Debugf("PostgreTeam %q added. Reloading postgres team CRDs and overwriting cached map", pgTeam.Name) + c.loadPostgresTeams() +} + +func (c *Controller) postgresTeamUpdate(prev, obj interface{}) { + pgTeam, ok := obj.(*acidv1.PostgresTeam) + if !ok { + c.logger.Errorf("could not cast to PostgresTeam spec") + } + c.logger.Debugf("PostgreTeam %q updated. Reloading postgres team CRDs and overwriting cached map", pgTeam.Name) + c.loadPostgresTeams() } func (c *Controller) podClusterName(pod *v1.Pod) spec.NamespacedName { diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index a5d3c7ac5..edc05d67e 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -8,26 +8,31 @@ import ( b64 "encoding/base64" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/k8sutil" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( - testInfrastructureRolesSecretName = "infrastructureroles-test" + testInfrastructureRolesOldSecretName = "infrastructureroles-old-test" + testInfrastructureRolesNewSecretName = "infrastructureroles-new-test" ) -func newMockController() *Controller { - controller := NewController(&spec.ControllerConfig{}) +func newUtilTestController() *Controller { + controller := NewController(&spec.ControllerConfig{}, "util-test") controller.opConfig.ClusterNameLabel = "cluster-name" controller.opConfig.InfrastructureRolesSecretName = - spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesSecretName} + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + } controller.opConfig.Workers = 4 controller.KubeClient = k8sutil.NewMockKubernetesClient() return controller } -var mockController = newMockController() +var utilTestController = newUtilTestController() func TestPodClusterName(t *testing.T) { var testTable = []struct { @@ -43,7 +48,7 @@ func TestPodClusterName(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Namespace: v1.NamespaceDefault, Labels: map[string]string{ - mockController.opConfig.ClusterNameLabel: "testcluster", + utilTestController.opConfig.ClusterNameLabel: "testcluster", }, }, }, @@ -51,7 +56,7 @@ func TestPodClusterName(t *testing.T) { }, } for _, test := range testTable { - resp := mockController.podClusterName(test.in) + resp := utilTestController.podClusterName(test.in) if resp != test.expected { t.Errorf("expected response %v does not match the actual %v", test.expected, resp) } @@ -73,31 +78,39 @@ func TestClusterWorkerID(t *testing.T) { }, } for _, test := range testTable { - resp := mockController.clusterWorkerID(test.in) + resp := utilTestController.clusterWorkerID(test.in) if resp != test.expected { t.Errorf("expected response %v does not match the actual %v", test.expected, resp) } } } -func TestGetInfrastructureRoles(t *testing.T) { +// Test functionality of getting infrastructure roles from their description in +// corresponding secrets. Here we test only common stuff (e.g. when a secret do +// not exist, or empty) and the old format. +func TestOldInfrastructureRoleFormat(t *testing.T) { var testTable = []struct { - secretName spec.NamespacedName - expectedRoles map[string]spec.PgUser - expectedError error + secretName spec.NamespacedName + expectedRoles map[string]spec.PgUser + expectedErrors []error }{ { + // empty secret name spec.NamespacedName{}, nil, nil, }, { + // secret does not exist spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: "null"}, - nil, - fmt.Errorf(`could not get infrastructure roles secret: NotFound`), + map[string]spec.PgUser{}, + []error{fmt.Errorf(`could not get infrastructure roles secret default/null: NotFound`)}, }, { - spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesSecretName}, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, map[string]spec.PgUser{ "testrole": { Name: "testrole", @@ -116,15 +129,354 @@ func TestGetInfrastructureRoles(t *testing.T) { }, } for _, test := range testTable { - roles, err := mockController.getInfrastructureRoles(&test.secretName) - if err != test.expectedError { - if err != nil && test.expectedError != nil && err.Error() == test.expectedError.Error() { - continue - } - t.Errorf("expected error '%v' does not match the actual error '%v'", test.expectedError, err) + roles, errors := utilTestController.getInfrastructureRoles( + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: test.secretName, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }, + }) + + if len(errors) != len(test.expectedErrors) { + t.Errorf("expected error '%v' does not match the actual error '%v'", + test.expectedErrors, errors) } + + for idx := range errors { + err := errors[idx] + expectedErr := test.expectedErrors[idx] + + if err != expectedErr { + if err != nil && expectedErr != nil && err.Error() == expectedErr.Error() { + continue + } + t.Errorf("expected error '%v' does not match the actual error '%v'", + expectedErr, err) + } + } + if !reflect.DeepEqual(roles, test.expectedRoles) { - t.Errorf("expected roles output %v does not match the actual %v", test.expectedRoles, roles) + t.Errorf("expected roles output %#v does not match the actual %#v", + test.expectedRoles, roles) + } + } +} + +// Test functionality of getting infrastructure roles from their description in +// corresponding secrets. Here we test the new format. +func TestNewInfrastructureRoleFormat(t *testing.T) { + var testTable = []struct { + secrets []spec.NamespacedName + expectedRoles map[string]spec.PgUser + expectedErrors []error + }{ + // one secret with one configmap + { + []spec.NamespacedName{ + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + }, + map[string]spec.PgUser{ + "new-test-role": { + Name: "new-test-role", + Origin: spec.RoleOriginInfrastructure, + Password: "new-test-password", + MemberOf: []string{"new-test-inrole"}, + }, + "new-foobar": { + Name: "new-foobar", + Origin: spec.RoleOriginInfrastructure, + Password: b64.StdEncoding.EncodeToString([]byte("password")), + MemberOf: nil, + Flags: []string{"createdb"}, + }, + }, + nil, + }, + // multiple standalone secrets + { + []spec.NamespacedName{ + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: "infrastructureroles-new-test1", + }, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: "infrastructureroles-new-test2", + }, + }, + map[string]spec.PgUser{ + "new-test-role1": { + Name: "new-test-role1", + Origin: spec.RoleOriginInfrastructure, + Password: "new-test-password1", + MemberOf: []string{"new-test-inrole1"}, + }, + "new-test-role2": { + Name: "new-test-role2", + Origin: spec.RoleOriginInfrastructure, + Password: "new-test-password2", + MemberOf: []string{"new-test-inrole2"}, + }, + }, + nil, + }, + } + for _, test := range testTable { + definitions := []*config.InfrastructureRole{} + for _, secret := range test.secrets { + definitions = append(definitions, &config.InfrastructureRole{ + SecretName: secret, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: false, + }) + } + + roles, errors := utilTestController.getInfrastructureRoles(definitions) + if len(errors) != len(test.expectedErrors) { + t.Errorf("expected error does not match the actual error:\n%+v\n%+v", + test.expectedErrors, errors) + + // Stop and do not do any further checks + return + } + + for idx := range errors { + err := errors[idx] + expectedErr := test.expectedErrors[idx] + + if err != expectedErr { + if err != nil && expectedErr != nil && err.Error() == expectedErr.Error() { + continue + } + t.Errorf("expected error '%v' does not match the actual error '%v'", + expectedErr, err) + } + } + + if !reflect.DeepEqual(roles, test.expectedRoles) { + t.Errorf("expected roles output/the actual:\n%#v\n%#v", + test.expectedRoles, roles) + } + } +} + +// Tests for getting correct infrastructure roles definitions from present +// configuration. E.g. in which secrets for which roles too look. The biggest +// point here is compatibility of old and new formats of defining +// infrastructure roles. +func TestInfrastructureRoleDefinitions(t *testing.T) { + var testTable = []struct { + rolesDefs []*config.InfrastructureRole + roleSecretName spec.NamespacedName + roleSecrets string + expectedDefs []*config.InfrastructureRole + }{ + // only new CRD format + { + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + }, + spec.NamespacedName{}, + "", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + }, + }, + // only new configmap format + { + []*config.InfrastructureRole{}, + spec.NamespacedName{}, + "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + }, + }, + // new configmap format with defaultRoleValue + { + []*config.InfrastructureRole{}, + spec.NamespacedName{}, + "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, defaultrolevalue: test-role", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + DefaultRoleValue: "test-role", + Template: false, + }, + }, + }, + // only old CRD and configmap format + { + []*config.InfrastructureRole{}, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + "", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }, + }, + }, + // both formats for CRD + { + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + }, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + "", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }, + }, + }, + // both formats for configmap + { + []*config.InfrastructureRole{}, + spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role", + []*config.InfrastructureRole{ + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesNewSecretName, + }, + UserKey: "test-user", + PasswordKey: "test-password", + RoleKey: "test-role", + Template: false, + }, + &config.InfrastructureRole{ + SecretName: spec.NamespacedName{ + Namespace: v1.NamespaceDefault, + Name: testInfrastructureRolesOldSecretName, + }, + UserKey: "user", + PasswordKey: "password", + RoleKey: "inrole", + Template: true, + }, + }, + }, + // incorrect configmap format + { + []*config.InfrastructureRole{}, + spec.NamespacedName{}, + "wrong-format", + []*config.InfrastructureRole{}, + }, + // configmap without a secret + { + []*config.InfrastructureRole{}, + spec.NamespacedName{}, + "userkey: test-user, passwordkey: test-password, rolekey: test-role", + []*config.InfrastructureRole{}, + }, + } + + for _, test := range testTable { + t.Logf("Test: %+v", test) + utilTestController.opConfig.InfrastructureRoles = test.rolesDefs + utilTestController.opConfig.InfrastructureRolesSecretName = test.roleSecretName + utilTestController.opConfig.InfrastructureRolesDefs = test.roleSecrets + + defs := utilTestController.getInfrastructureRoleDefinitions() + if len(defs) != len(test.expectedDefs) { + t.Errorf("expected definitions does not match the actual:\n%#v\n%#v", + test.expectedDefs, defs) + + // Stop and do not do any further checks + return + } + + for idx := range defs { + def := defs[idx] + expectedDef := test.expectedDefs[idx] + + if !reflect.DeepEqual(def, expectedDef) { + t.Errorf("expected definition/the actual:\n%#v\n%#v", + expectedDef, def) + } } } } diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go index cb72ec50f..ab4a88735 100644 --- a/pkg/generated/clientset/versioned/clientset.go +++ b/pkg/generated/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -65,7 +65,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { - return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } diff --git a/pkg/generated/clientset/versioned/doc.go b/pkg/generated/clientset/versioned/doc.go index 9ec677ac7..ae87609f6 100644 --- a/pkg/generated/clientset/versioned/doc.go +++ b/pkg/generated/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go index 55771905f..6ae5db2d3 100644 --- a/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/fake/doc.go b/pkg/generated/clientset/versioned/fake/doc.go index 7c9574952..bc1c91a11 100644 --- a/pkg/generated/clientset/versioned/fake/doc.go +++ b/pkg/generated/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go index 5363e8cc4..c4d383aab 100644 --- a/pkg/generated/clientset/versioned/fake/register.go +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -35,7 +35,7 @@ import ( var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) + var localSchemeBuilder = runtime.SchemeBuilder{ acidv1.AddToScheme, } diff --git a/pkg/generated/clientset/versioned/scheme/doc.go b/pkg/generated/clientset/versioned/scheme/doc.go index 02fd3d592..cd594164b 100644 --- a/pkg/generated/clientset/versioned/scheme/doc.go +++ b/pkg/generated/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/scheme/register.go b/pkg/generated/clientset/versioned/scheme/register.go index 381948a4a..8be969eb5 100644 --- a/pkg/generated/clientset/versioned/scheme/register.go +++ b/pkg/generated/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go index 1879b9514..5666201d4 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -33,6 +33,7 @@ import ( type AcidV1Interface interface { RESTClient() rest.Interface OperatorConfigurationsGetter + PostgresTeamsGetter PostgresqlsGetter } @@ -45,6 +46,10 @@ func (c *AcidV1Client) OperatorConfigurations(namespace string) OperatorConfigur return newOperatorConfigurations(c, namespace) } +func (c *AcidV1Client) PostgresTeams(namespace string) PostgresTeamInterface { + return newPostgresTeams(c, namespace) +} + func (c *AcidV1Client) Postgresqls(namespace string) PostgresqlInterface { return newPostgresqls(c, namespace) } diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go index 55338c4de..eb8fcf1f4 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go index 1ae436a9b..c5fd1c04b 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go index 8cd4dc9da..03e7dda94 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -38,6 +38,10 @@ func (c *FakeAcidV1) OperatorConfigurations(namespace string) v1.OperatorConfigu return &FakeOperatorConfigurations{c, namespace} } +func (c *FakeAcidV1) PostgresTeams(namespace string) v1.PostgresTeamInterface { + return &FakePostgresTeams{c, namespace} +} + func (c *FakeAcidV1) Postgresqls(namespace string) v1.PostgresqlInterface { return &FakePostgresqls{c, namespace} } diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go index 732b48250..c03ea7d94 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -25,6 +25,8 @@ SOFTWARE. package fake import ( + "context" + acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -42,7 +44,7 @@ var operatorconfigurationsResource = schema.GroupVersionResource{Group: "acid.za var operatorconfigurationsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "OperatorConfiguration"} // Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any. -func (c *FakeOperatorConfigurations) Get(name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) { +func (c *FakeOperatorConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(operatorconfigurationsResource, c.ns, name), &acidzalandov1.OperatorConfiguration{}) diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go index 1ab20dbfc..01a0ed7a4 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -25,6 +25,8 @@ SOFTWARE. package fake import ( + "context" + acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" @@ -45,7 +47,7 @@ var postgresqlsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Ve var postgresqlsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "Postgresql"} // Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any. -func (c *FakePostgresqls) Get(name string, options v1.GetOptions) (result *acidzalandov1.Postgresql, err error) { +func (c *FakePostgresqls) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.Postgresql, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(postgresqlsResource, c.ns, name), &acidzalandov1.Postgresql{}) @@ -56,7 +58,7 @@ func (c *FakePostgresqls) Get(name string, options v1.GetOptions) (result *acidz } // List takes label and field selectors, and returns the list of Postgresqls that match those selectors. -func (c *FakePostgresqls) List(opts v1.ListOptions) (result *acidzalandov1.PostgresqlList, err error) { +func (c *FakePostgresqls) List(ctx context.Context, opts v1.ListOptions) (result *acidzalandov1.PostgresqlList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(postgresqlsResource, postgresqlsKind, c.ns, opts), &acidzalandov1.PostgresqlList{}) @@ -78,14 +80,14 @@ func (c *FakePostgresqls) List(opts v1.ListOptions) (result *acidzalandov1.Postg } // Watch returns a watch.Interface that watches the requested postgresqls. -func (c *FakePostgresqls) Watch(opts v1.ListOptions) (watch.Interface, error) { +func (c *FakePostgresqls) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(postgresqlsResource, c.ns, opts)) } // Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any. -func (c *FakePostgresqls) Create(postgresql *acidzalandov1.Postgresql) (result *acidzalandov1.Postgresql, err error) { +func (c *FakePostgresqls) Create(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts v1.CreateOptions) (result *acidzalandov1.Postgresql, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{}) @@ -96,7 +98,7 @@ func (c *FakePostgresqls) Create(postgresql *acidzalandov1.Postgresql) (result * } // Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any. -func (c *FakePostgresqls) Update(postgresql *acidzalandov1.Postgresql) (result *acidzalandov1.Postgresql, err error) { +func (c *FakePostgresqls) Update(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts v1.UpdateOptions) (result *acidzalandov1.Postgresql, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{}) @@ -108,7 +110,7 @@ func (c *FakePostgresqls) Update(postgresql *acidzalandov1.Postgresql) (result * // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePostgresqls) UpdateStatus(postgresql *acidzalandov1.Postgresql) (*acidzalandov1.Postgresql, error) { +func (c *FakePostgresqls) UpdateStatus(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts v1.UpdateOptions) (*acidzalandov1.Postgresql, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(postgresqlsResource, "status", c.ns, postgresql), &acidzalandov1.Postgresql{}) @@ -119,7 +121,7 @@ func (c *FakePostgresqls) UpdateStatus(postgresql *acidzalandov1.Postgresql) (*a } // Delete takes name of the postgresql and deletes it. Returns an error if one occurs. -func (c *FakePostgresqls) Delete(name string, options *v1.DeleteOptions) error { +func (c *FakePostgresqls) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(postgresqlsResource, c.ns, name), &acidzalandov1.Postgresql{}) @@ -127,15 +129,15 @@ func (c *FakePostgresqls) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *FakePostgresqls) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(postgresqlsResource, c.ns, listOptions) +func (c *FakePostgresqls) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(postgresqlsResource, c.ns, listOpts) _, err := c.Fake.Invokes(action, &acidzalandov1.PostgresqlList{}) return err } // Patch applies the patch and returns the patched postgresql. -func (c *FakePostgresqls) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *acidzalandov1.Postgresql, err error) { +func (c *FakePostgresqls) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *acidzalandov1.Postgresql, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(postgresqlsResource, c.ns, name, pt, data, subresources...), &acidzalandov1.Postgresql{}) diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go new file mode 100644 index 000000000..b333ae046 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go @@ -0,0 +1,136 @@ +/* +Copyright 2021 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePostgresTeams implements PostgresTeamInterface +type FakePostgresTeams struct { + Fake *FakeAcidV1 + ns string +} + +var postgresteamsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "postgresteams"} + +var postgresteamsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "PostgresTeam"} + +// Get takes name of the postgresTeam, and returns the corresponding postgresTeam object, and an error if there is any. +func (c *FakePostgresTeams) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(postgresteamsResource, c.ns, name), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} + +// List takes label and field selectors, and returns the list of PostgresTeams that match those selectors. +func (c *FakePostgresTeams) List(ctx context.Context, opts v1.ListOptions) (result *acidzalandov1.PostgresTeamList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(postgresteamsResource, postgresteamsKind, c.ns, opts), &acidzalandov1.PostgresTeamList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &acidzalandov1.PostgresTeamList{ListMeta: obj.(*acidzalandov1.PostgresTeamList).ListMeta} + for _, item := range obj.(*acidzalandov1.PostgresTeamList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested postgresTeams. +func (c *FakePostgresTeams) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(postgresteamsResource, c.ns, opts)) + +} + +// Create takes the representation of a postgresTeam and creates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *FakePostgresTeams) Create(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts v1.CreateOptions) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(postgresteamsResource, c.ns, postgresTeam), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} + +// Update takes the representation of a postgresTeam and updates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *FakePostgresTeams) Update(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts v1.UpdateOptions) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(postgresteamsResource, c.ns, postgresTeam), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} + +// Delete takes name of the postgresTeam and deletes it. Returns an error if one occurs. +func (c *FakePostgresTeams) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(postgresteamsResource, c.ns, name), &acidzalandov1.PostgresTeam{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePostgresTeams) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(postgresteamsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &acidzalandov1.PostgresTeamList{}) + return err +} + +// Patch applies the patch and returns the patched postgresTeam. +func (c *FakePostgresTeams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(postgresteamsResource, c.ns, name, pt, data, subresources...), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go index fd5707c75..b4e99cbc8 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -26,4 +26,6 @@ package v1 type OperatorConfigurationExpansion interface{} +type PostgresTeamExpansion interface{} + type PostgresqlExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go index e9cc0de77..be22e075d 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -25,6 +25,8 @@ SOFTWARE. package v1 import ( + "context" + acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,7 +41,7 @@ type OperatorConfigurationsGetter interface { // OperatorConfigurationInterface has methods to work with OperatorConfiguration resources. type OperatorConfigurationInterface interface { - Get(name string, options v1.GetOptions) (*acidzalandov1.OperatorConfiguration, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*acidzalandov1.OperatorConfiguration, error) OperatorConfigurationExpansion } @@ -58,14 +60,14 @@ func newOperatorConfigurations(c *AcidV1Client, namespace string) *operatorConfi } // Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any. -func (c *operatorConfigurations) Get(name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) { +func (c *operatorConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) { result = &acidzalandov1.OperatorConfiguration{} err = c.client.Get(). Namespace(c.ns). Resource("operatorconfigurations"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go index 78c0fc390..5241cfb54 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -25,6 +25,7 @@ SOFTWARE. package v1 import ( + "context" "time" v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" @@ -43,15 +44,15 @@ type PostgresqlsGetter interface { // PostgresqlInterface has methods to work with Postgresql resources. type PostgresqlInterface interface { - Create(*v1.Postgresql) (*v1.Postgresql, error) - Update(*v1.Postgresql) (*v1.Postgresql, error) - UpdateStatus(*v1.Postgresql) (*v1.Postgresql, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Postgresql, error) - List(opts metav1.ListOptions) (*v1.PostgresqlList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Postgresql, err error) + Create(ctx context.Context, postgresql *v1.Postgresql, opts metav1.CreateOptions) (*v1.Postgresql, error) + Update(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (*v1.Postgresql, error) + UpdateStatus(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (*v1.Postgresql, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Postgresql, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PostgresqlList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Postgresql, err error) PostgresqlExpansion } @@ -70,20 +71,20 @@ func newPostgresqls(c *AcidV1Client, namespace string) *postgresqls { } // Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any. -func (c *postgresqls) Get(name string, options metav1.GetOptions) (result *v1.Postgresql, err error) { +func (c *postgresqls) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Postgresql, err error) { result = &v1.Postgresql{} err = c.client.Get(). Namespace(c.ns). Resource("postgresqls"). Name(name). VersionedParams(&options, scheme.ParameterCodec). - Do(). + Do(ctx). Into(result) return } // List takes label and field selectors, and returns the list of Postgresqls that match those selectors. -func (c *postgresqls) List(opts metav1.ListOptions) (result *v1.PostgresqlList, err error) { +func (c *postgresqls) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PostgresqlList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -94,13 +95,13 @@ func (c *postgresqls) List(opts metav1.ListOptions) (result *v1.PostgresqlList, Resource("postgresqls"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Do(). + Do(ctx). Into(result) return } // Watch returns a watch.Interface that watches the requested postgresqls. -func (c *postgresqls) Watch(opts metav1.ListOptions) (watch.Interface, error) { +func (c *postgresqls) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -111,87 +112,90 @@ func (c *postgresqls) Watch(opts metav1.ListOptions) (watch.Interface, error) { Resource("postgresqls"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). - Watch() + Watch(ctx) } // Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any. -func (c *postgresqls) Create(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) { +func (c *postgresqls) Create(ctx context.Context, postgresql *v1.Postgresql, opts metav1.CreateOptions) (result *v1.Postgresql, err error) { result = &v1.Postgresql{} err = c.client.Post(). Namespace(c.ns). Resource("postgresqls"). + VersionedParams(&opts, scheme.ParameterCodec). Body(postgresql). - Do(). + Do(ctx). Into(result) return } // Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any. -func (c *postgresqls) Update(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) { +func (c *postgresqls) Update(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (result *v1.Postgresql, err error) { result = &v1.Postgresql{} err = c.client.Put(). Namespace(c.ns). Resource("postgresqls"). Name(postgresql.Name). + VersionedParams(&opts, scheme.ParameterCodec). Body(postgresql). - Do(). + Do(ctx). Into(result) return } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *postgresqls) UpdateStatus(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) { +func (c *postgresqls) UpdateStatus(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (result *v1.Postgresql, err error) { result = &v1.Postgresql{} err = c.client.Put(). Namespace(c.ns). Resource("postgresqls"). Name(postgresql.Name). SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). Body(postgresql). - Do(). + Do(ctx). Into(result) return } // Delete takes name of the postgresql and deletes it. Returns an error if one occurs. -func (c *postgresqls) Delete(name string, options *metav1.DeleteOptions) error { +func (c *postgresqls) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("postgresqls"). Name(name). - Body(options). - Do(). + Body(&opts). + Do(ctx). Error() } // DeleteCollection deletes a collection of objects. -func (c *postgresqls) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { +func (c *postgresqls) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second } return c.client.Delete(). Namespace(c.ns). Resource("postgresqls"). - VersionedParams(&listOptions, scheme.ParameterCodec). + VersionedParams(&listOpts, scheme.ParameterCodec). Timeout(timeout). - Body(options). - Do(). + Body(&opts). + Do(ctx). Error() } // Patch applies the patch and returns the patched postgresql. -func (c *postgresqls) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Postgresql, err error) { +func (c *postgresqls) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Postgresql, err error) { result = &v1.Postgresql{} err = c.client.Patch(pt). Namespace(c.ns). Resource("postgresqls"). - SubResource(subresources...). Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). Body(data). - Do(). + Do(ctx). Into(result) return } diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go new file mode 100644 index 000000000..96fbb882a --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go @@ -0,0 +1,184 @@ +/* +Copyright 2021 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PostgresTeamsGetter has a method to return a PostgresTeamInterface. +// A group's client should implement this interface. +type PostgresTeamsGetter interface { + PostgresTeams(namespace string) PostgresTeamInterface +} + +// PostgresTeamInterface has methods to work with PostgresTeam resources. +type PostgresTeamInterface interface { + Create(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.CreateOptions) (*v1.PostgresTeam, error) + Update(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.UpdateOptions) (*v1.PostgresTeam, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PostgresTeam, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PostgresTeamList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PostgresTeam, err error) + PostgresTeamExpansion +} + +// postgresTeams implements PostgresTeamInterface +type postgresTeams struct { + client rest.Interface + ns string +} + +// newPostgresTeams returns a PostgresTeams +func newPostgresTeams(c *AcidV1Client, namespace string) *postgresTeams { + return &postgresTeams{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the postgresTeam, and returns the corresponding postgresTeam object, and an error if there is any. +func (c *postgresTeams) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Get(). + Namespace(c.ns). + Resource("postgresteams"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PostgresTeams that match those selectors. +func (c *postgresTeams) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PostgresTeamList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PostgresTeamList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested postgresTeams. +func (c *postgresTeams) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a postgresTeam and creates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *postgresTeams) Create(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.CreateOptions) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Post(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(postgresTeam). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a postgresTeam and updates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *postgresTeams) Update(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.UpdateOptions) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Put(). + Namespace(c.ns). + Resource("postgresteams"). + Name(postgresTeam.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(postgresTeam). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the postgresTeam and deletes it. Returns an error if one occurs. +func (c *postgresTeams) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("postgresteams"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *postgresTeams) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched postgresTeam. +func (c *postgresTeams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("postgresteams"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/interface.go index 4ff4a3d06..6f77564fa 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/interface.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go index 30090afee..5c05e6d68 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -30,6 +30,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // PostgresTeams returns a PostgresTeamInformer. + PostgresTeams() PostgresTeamInformer // Postgresqls returns a PostgresqlInformer. Postgresqls() PostgresqlInformer } @@ -45,6 +47,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// PostgresTeams returns a PostgresTeamInformer. +func (v *version) PostgresTeams() PostgresTeamInformer { + return &postgresTeamInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Postgresqls returns a PostgresqlInformer. func (v *version) Postgresqls() PostgresqlInformer { return &postgresqlInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go index da7f91669..1453af276 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -25,6 +25,7 @@ SOFTWARE. package v1 import ( + "context" time "time" acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" @@ -67,13 +68,13 @@ func NewFilteredPostgresqlInformer(client versioned.Interface, namespace string, if tweakListOptions != nil { tweakListOptions(&options) } - return client.AcidV1().Postgresqls(namespace).List(options) + return client.AcidV1().Postgresqls(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.AcidV1().Postgresqls(namespace).Watch(options) + return client.AcidV1().Postgresqls(namespace).Watch(context.TODO(), options) }, }, &acidzalandov1.Postgresql{}, diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go new file mode 100644 index 000000000..a19e4726f --- /dev/null +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go @@ -0,0 +1,96 @@ +/* +Copyright 2021 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + versioned "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" + internalinterfaces "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/zalando/postgres-operator/pkg/generated/listers/acid.zalan.do/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PostgresTeamInformer provides access to a shared informer and lister for +// PostgresTeams. +type PostgresTeamInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PostgresTeamLister +} + +type postgresTeamInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPostgresTeamInformer constructs a new informer for PostgresTeam type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPostgresTeamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPostgresTeamInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPostgresTeamInformer constructs a new informer for PostgresTeam type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPostgresTeamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcidV1().PostgresTeams(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcidV1().PostgresTeams(namespace).Watch(context.TODO(), options) + }, + }, + &acidzalandov1.PostgresTeam{}, + resyncPeriod, + indexers, + ) +} + +func (f *postgresTeamInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPostgresTeamInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *postgresTeamInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&acidzalandov1.PostgresTeam{}, f.defaultInformer) +} + +func (f *postgresTeamInformer) Lister() v1.PostgresTeamLister { + return v1.NewPostgresTeamLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go index 4e6b36614..e4b1efdc6 100644 --- a/pkg/generated/informers/externalversions/factory.go +++ b/pkg/generated/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 562dec419..5fd693558 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -59,6 +59,8 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=acid.zalan.do, Version=v1 + case v1.SchemeGroupVersion.WithResource("postgresteams"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Acid().V1().PostgresTeams().Informer()}, nil case v1.SchemeGroupVersion.WithResource("postgresqls"): return &genericInformer{resource: resource.GroupResource(), informer: f.Acid().V1().Postgresqls().Informer()}, nil diff --git a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go index 9f4e14a1a..6d1b334bf 100644 --- a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go index 1b96a7c76..cc3e578b2 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go +++ b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -24,6 +24,14 @@ SOFTWARE. package v1 +// PostgresTeamListerExpansion allows custom methods to be added to +// PostgresTeamLister. +type PostgresTeamListerExpansion interface{} + +// PostgresTeamNamespaceListerExpansion allows custom methods to be added to +// PostgresTeamNamespaceLister. +type PostgresTeamNamespaceListerExpansion interface{} + // PostgresqlListerExpansion allows custom methods to be added to // PostgresqlLister. type PostgresqlListerExpansion interface{} diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go index 9a60c8281..d2258bd01 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Compose, Zalando SE +Copyright 2021 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -32,8 +32,10 @@ import ( ) // PostgresqlLister helps list Postgresqls. +// All objects returned here must be treated as read-only. type PostgresqlLister interface { // List lists all Postgresqls in the indexer. + // Objects returned here must be treated as read-only. List(selector labels.Selector) (ret []*v1.Postgresql, err error) // Postgresqls returns an object that can list and get Postgresqls. Postgresqls(namespace string) PostgresqlNamespaceLister @@ -64,10 +66,13 @@ func (s *postgresqlLister) Postgresqls(namespace string) PostgresqlNamespaceList } // PostgresqlNamespaceLister helps list and get Postgresqls. +// All objects returned here must be treated as read-only. type PostgresqlNamespaceLister interface { // List lists all Postgresqls in the indexer for a given namespace. + // Objects returned here must be treated as read-only. List(selector labels.Selector) (ret []*v1.Postgresql, err error) // Get retrieves the Postgresql from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. Get(name string) (*v1.Postgresql, error) PostgresqlNamespaceListerExpansion } diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go b/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go new file mode 100644 index 000000000..38073e92d --- /dev/null +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go @@ -0,0 +1,105 @@ +/* +Copyright 2021 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PostgresTeamLister helps list PostgresTeams. +// All objects returned here must be treated as read-only. +type PostgresTeamLister interface { + // List lists all PostgresTeams in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) + // PostgresTeams returns an object that can list and get PostgresTeams. + PostgresTeams(namespace string) PostgresTeamNamespaceLister + PostgresTeamListerExpansion +} + +// postgresTeamLister implements the PostgresTeamLister interface. +type postgresTeamLister struct { + indexer cache.Indexer +} + +// NewPostgresTeamLister returns a new PostgresTeamLister. +func NewPostgresTeamLister(indexer cache.Indexer) PostgresTeamLister { + return &postgresTeamLister{indexer: indexer} +} + +// List lists all PostgresTeams in the indexer. +func (s *postgresTeamLister) List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PostgresTeam)) + }) + return ret, err +} + +// PostgresTeams returns an object that can list and get PostgresTeams. +func (s *postgresTeamLister) PostgresTeams(namespace string) PostgresTeamNamespaceLister { + return postgresTeamNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PostgresTeamNamespaceLister helps list and get PostgresTeams. +// All objects returned here must be treated as read-only. +type PostgresTeamNamespaceLister interface { + // List lists all PostgresTeams in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) + // Get retrieves the PostgresTeam from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.PostgresTeam, error) + PostgresTeamNamespaceListerExpansion +} + +// postgresTeamNamespaceLister implements the PostgresTeamNamespaceLister +// interface. +type postgresTeamNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PostgresTeams in the indexer for a given namespace. +func (s postgresTeamNamespaceLister) List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PostgresTeam)) + }) + return ret, err +} + +// Get retrieves the PostgresTeam from the indexer for a given namespace and name. +func (s postgresTeamNamespaceLister) Get(name string) (*v1.PostgresTeam, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("postgresteam"), name) + } + return obj.(*v1.PostgresTeam), nil +} diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 3e6bec8db..78c79e1b3 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -23,13 +23,16 @@ const fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespa // RoleOrigin contains the code of the origin of a role type RoleOrigin int -// The rolesOrigin constant values must be sorted by the role priority for resolveNameConflict(...) to work. +// The rolesOrigin constant values must be sorted by the role priority for +// resolveNameConflict(...) to work. const ( RoleOriginUnknown RoleOrigin = iota RoleOriginManifest RoleOriginInfrastructure RoleOriginTeamsAPI RoleOriginSystem + RoleOriginBootstrap + RoleConnectionPooler ) type syncUserOperation int @@ -52,6 +55,10 @@ type PgUser struct { AdminRole string `yaml:"admin_role"` } +func (user *PgUser) Valid() bool { + return user.Name != "" && user.Password != "" +} + // PgUserMap maps user names to the definitions. type PgUserMap map[string]PgUser @@ -107,6 +114,8 @@ type ControllerConfig struct { CRDReadyWaitTimeout time.Duration ConfigMapName NamespacedName Namespace string + + EnableJsonLogging bool } // cached value for the GetOperatorNamespace @@ -178,6 +187,10 @@ func (r RoleOrigin) String() string { return "teams API role" case RoleOriginSystem: return "system role" + case RoleOriginBootstrap: + return "bootstrapped role" + case RoleConnectionPooler: + return "connection pooler role" default: panic(fmt.Sprintf("bogus role origin value %d", r)) } diff --git a/pkg/teams/postgres_team.go b/pkg/teams/postgres_team.go new file mode 100644 index 000000000..7fb725765 --- /dev/null +++ b/pkg/teams/postgres_team.go @@ -0,0 +1,118 @@ +package teams + +import ( + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util" +) + +// PostgresTeamMap is the operator's internal representation of all PostgresTeam CRDs +type PostgresTeamMap map[string]postgresTeamMembership + +type postgresTeamMembership struct { + AdditionalSuperuserTeams []string + AdditionalTeams []string + AdditionalMembers []string +} + +type teamHashSet map[string]map[string]struct{} + +func (ths *teamHashSet) has(team string) bool { + _, ok := (*ths)[team] + return ok +} + +func (ths *teamHashSet) add(newTeam string, newSet []string) { + set := make(map[string]struct{}) + if ths.has(newTeam) { + set = (*ths)[newTeam] + } + for _, t := range newSet { + set[t] = struct{}{} + } + (*ths)[newTeam] = set +} + +func (ths *teamHashSet) toMap() map[string][]string { + newTeamMap := make(map[string][]string) + for team, items := range *ths { + list := []string{} + for item := range items { + list = append(list, item) + } + newTeamMap[team] = list + } + return newTeamMap +} + +func (ths *teamHashSet) mergeCrdMap(crdTeamMap map[string][]string) { + for t, at := range crdTeamMap { + ths.add(t, at) + } +} + +func fetchTeams(teamset *map[string]struct{}, set teamHashSet) { + for key := range set { + (*teamset)[key] = struct{}{} + } +} + +func (ptm *PostgresTeamMap) fetchAdditionalTeams(team string, superuserTeams bool, transitive bool, exclude []string) []string { + + var teams []string + + if superuserTeams { + teams = (*ptm)[team].AdditionalSuperuserTeams + } else { + teams = (*ptm)[team].AdditionalTeams + } + if transitive { + exclude = append(exclude, team) + for _, additionalTeam := range teams { + if !(util.SliceContains(exclude, additionalTeam)) { + transitiveTeams := (*ptm).fetchAdditionalTeams(additionalTeam, superuserTeams, transitive, exclude) + for _, transitiveTeam := range transitiveTeams { + if !(util.SliceContains(exclude, transitiveTeam)) && !(util.SliceContains(teams, transitiveTeam)) { + teams = append(teams, transitiveTeam) + } + } + } + } + } + + return teams +} + +// GetAdditionalTeams function to retrieve list of additional teams +func (ptm *PostgresTeamMap) GetAdditionalTeams(team string, transitive bool) []string { + return ptm.fetchAdditionalTeams(team, false, transitive, []string{}) +} + +// GetAdditionalSuperuserTeams function to retrieve list of additional superuser teams +func (ptm *PostgresTeamMap) GetAdditionalSuperuserTeams(team string, transitive bool) []string { + return ptm.fetchAdditionalTeams(team, true, transitive, []string{}) +} + +// Load function to import data from PostgresTeam CRD +func (ptm *PostgresTeamMap) Load(pgTeams *acidv1.PostgresTeamList) { + superuserTeamSet := teamHashSet{} + teamSet := teamHashSet{} + teamMemberSet := teamHashSet{} + teamIDs := make(map[string]struct{}) + + for _, pgTeam := range pgTeams.Items { + superuserTeamSet.mergeCrdMap(pgTeam.Spec.AdditionalSuperuserTeams) + teamSet.mergeCrdMap(pgTeam.Spec.AdditionalTeams) + teamMemberSet.mergeCrdMap(pgTeam.Spec.AdditionalMembers) + } + fetchTeams(&teamIDs, superuserTeamSet) + fetchTeams(&teamIDs, teamSet) + fetchTeams(&teamIDs, teamMemberSet) + + for teamID := range teamIDs { + (*ptm)[teamID] = postgresTeamMembership{ + AdditionalSuperuserTeams: util.CoalesceStrArr(superuserTeamSet.toMap()[teamID], []string{}), + AdditionalTeams: util.CoalesceStrArr(teamSet.toMap()[teamID], []string{}), + AdditionalMembers: util.CoalesceStrArr(teamMemberSet.toMap()[teamID], []string{}), + } + } +} diff --git a/pkg/teams/postgres_team_test.go b/pkg/teams/postgres_team_test.go new file mode 100644 index 000000000..f8c3a21d8 --- /dev/null +++ b/pkg/teams/postgres_team_test.go @@ -0,0 +1,194 @@ +package teams + +import ( + "testing" + + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + True = true + False = false + pgTeamList = acidv1.PostgresTeamList{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + Items: []acidv1.PostgresTeam{ + { + TypeMeta: metav1.TypeMeta{ + Kind: "PostgresTeam", + APIVersion: "acid.zalan.do/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "teamAB", + }, + Spec: acidv1.PostgresTeamSpec{ + AdditionalSuperuserTeams: map[string][]string{"teamA": []string{"teamB", "team24x7"}, "teamB": []string{"teamA", "teamC", "team24x7"}}, + AdditionalTeams: map[string][]string{"teamA": []string{"teamC"}, "teamB": []string{}}, + AdditionalMembers: map[string][]string{"team24x7": []string{"optimusprime"}, "teamB": []string{"drno"}}, + }, + }, { + TypeMeta: metav1.TypeMeta{ + Kind: "PostgresTeam", + APIVersion: "acid.zalan.do/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "teamC", + }, + Spec: acidv1.PostgresTeamSpec{ + AdditionalSuperuserTeams: map[string][]string{"teamC": []string{"team24x7"}}, + AdditionalTeams: map[string][]string{"teamA": []string{"teamC"}, "teamC": []string{"teamA", "teamB", "acid"}}, + AdditionalMembers: map[string][]string{"acid": []string{"batman"}}, + }, + }, + }, + } +) + +// PostgresTeamMap is the operator's internal representation of all PostgresTeam CRDs +func TestLoadingPostgresTeamCRD(t *testing.T) { + tests := []struct { + name string + crd acidv1.PostgresTeamList + ptm PostgresTeamMap + error string + }{ + { + "Check that CRD is imported correctly into the internal format", + pgTeamList, + PostgresTeamMap{ + "teamA": { + AdditionalSuperuserTeams: []string{"teamB", "team24x7"}, + AdditionalTeams: []string{"teamC"}, + AdditionalMembers: []string{}, + }, + "teamB": { + AdditionalSuperuserTeams: []string{"teamA", "teamC", "team24x7"}, + AdditionalTeams: []string{}, + AdditionalMembers: []string{"drno"}, + }, + "teamC": { + AdditionalSuperuserTeams: []string{"team24x7"}, + AdditionalTeams: []string{"teamA", "teamB", "acid"}, + AdditionalMembers: []string{}, + }, + "team24x7": { + AdditionalSuperuserTeams: []string{}, + AdditionalTeams: []string{}, + AdditionalMembers: []string{"optimusprime"}, + }, + "acid": { + AdditionalSuperuserTeams: []string{}, + AdditionalTeams: []string{}, + AdditionalMembers: []string{"batman"}, + }, + }, + "Mismatch between PostgresTeam CRD and internal map", + }, + } + + for _, tt := range tests { + postgresTeamMap := PostgresTeamMap{} + postgresTeamMap.Load(&tt.crd) + for team, ptmeamMembership := range postgresTeamMap { + if !util.IsEqualIgnoreOrder(ptmeamMembership.AdditionalSuperuserTeams, tt.ptm[team].AdditionalSuperuserTeams) { + t.Errorf("%s: %v: expected additional members %#v, got %#v", tt.name, tt.error, tt.ptm, postgresTeamMap) + } + if !util.IsEqualIgnoreOrder(ptmeamMembership.AdditionalTeams, tt.ptm[team].AdditionalTeams) { + t.Errorf("%s: %v: expected additional teams %#v, got %#v", tt.name, tt.error, tt.ptm, postgresTeamMap) + } + if !util.IsEqualIgnoreOrder(ptmeamMembership.AdditionalMembers, tt.ptm[team].AdditionalMembers) { + t.Errorf("%s: %v: expected additional superuser teams %#v, got %#v", tt.name, tt.error, tt.ptm, postgresTeamMap) + } + } + } +} + +// TestGetAdditionalTeams if returns teams with and without transitive dependencies +func TestGetAdditionalTeams(t *testing.T) { + tests := []struct { + name string + team string + transitive bool + teams []string + error string + }{ + { + "Check that additional teams are returned", + "teamA", + false, + []string{"teamC"}, + "GetAdditionalTeams returns wrong list", + }, + { + "Check that additional teams are returned incl. transitive teams", + "teamA", + true, + []string{"teamC", "teamB", "acid"}, + "GetAdditionalTeams returns wrong list", + }, + { + "Check that empty list is returned", + "teamB", + false, + []string{}, + "GetAdditionalTeams returns wrong list", + }, + } + + postgresTeamMap := PostgresTeamMap{} + postgresTeamMap.Load(&pgTeamList) + + for _, tt := range tests { + additionalTeams := postgresTeamMap.GetAdditionalTeams(tt.team, tt.transitive) + if !util.IsEqualIgnoreOrder(additionalTeams, tt.teams) { + t.Errorf("%s: %v: expected additional teams %#v, got %#v", tt.name, tt.error, tt.teams, additionalTeams) + } + } +} + +// TestGetAdditionalSuperuserTeams if returns teams with and without transitive dependencies +func TestGetAdditionalSuperuserTeams(t *testing.T) { + tests := []struct { + name string + team string + transitive bool + teams []string + error string + }{ + { + "Check that additional superuser teams are returned", + "teamA", + false, + []string{"teamB", "team24x7"}, + "GetAdditionalSuperuserTeams returns wrong list", + }, + { + "Check that additional superuser teams are returned incl. transitive superuser teams", + "teamA", + true, + []string{"teamB", "teamC", "team24x7"}, + "GetAdditionalSuperuserTeams returns wrong list", + }, + { + "Check that empty list is returned", + "team24x7", + false, + []string{}, + "GetAdditionalSuperuserTeams returns wrong list", + }, + } + + postgresTeamMap := PostgresTeamMap{} + postgresTeamMap.Load(&pgTeamList) + + for _, tt := range tests { + additionalTeams := postgresTeamMap.GetAdditionalSuperuserTeams(tt.team, tt.transitive) + if !util.IsEqualIgnoreOrder(additionalTeams, tt.teams) { + t.Errorf("%s: %v: expected additional teams %#v, got %#v", tt.name, tt.error, tt.teams, additionalTeams) + } + } +} diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index c4121ae8e..107ea9360 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -8,6 +8,8 @@ import ( "fmt" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/util/constants" + v1 "k8s.io/api/core/v1" ) // CRD describes CustomResourceDefinition specific configuration parameters @@ -21,43 +23,79 @@ type CRD struct { // Resources describes kubernetes resource specific configuration parameters type Resources struct { - ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"` - ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"` - PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"` - PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"` - PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` - SpiloFSGroup *int64 `name:"spilo_fsgroup"` - PodPriorityClassName string `name:"pod_priority_class_name"` - ClusterDomain string `name:"cluster_domain" default:"cluster.local"` - SpiloPrivileged bool `name:"spilo_privileged" default:"false"` - ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"` - InheritedLabels []string `name:"inherited_labels" default:""` - ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"` - PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` - PodToleration map[string]string `name:"toleration" default:""` - DefaultCPURequest string `name:"default_cpu_request" default:"100m"` - DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"` - DefaultCPULimit string `name:"default_cpu_limit" default:"1"` - DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"` - MinCPULimit string `name:"min_cpu_limit" default:"250m"` - MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"` - PodEnvironmentConfigMap string `name:"pod_environment_configmap" default:""` - NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""` - MaxInstances int32 `name:"max_instances" default:"-1"` - MinInstances int32 `name:"min_instances" default:"-1"` - ShmVolume *bool `name:"enable_shm_volume" default:"true"` + ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"` + ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"` + PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"` + PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"` + PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` + SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"` + SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"` + SpiloFSGroup *int64 `name:"spilo_fsgroup"` + PodPriorityClassName string `name:"pod_priority_class_name"` + ClusterDomain string `name:"cluster_domain" default:"cluster.local"` + SpiloPrivileged bool `name:"spilo_privileged" default:"false"` + ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"` + InheritedLabels []string `name:"inherited_labels" default:""` + InheritedAnnotations []string `name:"inherited_annotations" default:""` + DownscalerAnnotations []string `name:"downscaler_annotations"` + ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"` + DeleteAnnotationDateKey string `name:"delete_annotation_date_key"` + DeleteAnnotationNameKey string `name:"delete_annotation_name_key"` + PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` + PodToleration map[string]string `name:"toleration" default:""` + DefaultCPURequest string `name:"default_cpu_request" default:"100m"` + DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"` + DefaultCPULimit string `name:"default_cpu_limit" default:"1"` + DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"` + MinCPULimit string `name:"min_cpu_limit" default:"250m"` + MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"` + PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"` + PodEnvironmentSecret string `name:"pod_environment_secret"` + NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""` + MaxInstances int32 `name:"max_instances" default:"-1"` + MinInstances int32 `name:"min_instances" default:"-1"` + ShmVolume *bool `name:"enable_shm_volume" default:"true"` +} + +type InfrastructureRole struct { + // Name of a secret which describes the role, and optionally name of a + // configmap with an extra information + SecretName spec.NamespacedName + + UserKey string + PasswordKey string + RoleKey string + + DefaultUserValue string + DefaultRoleValue string + + // This field point out the detailed yaml definition of the role, if exists + Details string + + // Specify if a secret contains multiple fields in the following format: + // + // %(userkey)idx: ... + // %(passwordkey)idx: ... + // %(rolekey)idx: ... + // + // If it does, Name/Password/Role are interpreted not as unique field + // names, but as a template. + + Template bool } // Auth describes authentication specific configuration parameters type Auth struct { - SecretNameTemplate StringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"` - PamRoleName string `name:"pam_role_name" default:"zalandos"` - PamConfiguration string `name:"pam_configuration" default:"https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees"` - TeamsAPIUrl string `name:"teams_api_url" default:"https://teams.example.com/api/"` - OAuthTokenSecretName spec.NamespacedName `name:"oauth_token_secret_name" default:"postgresql-operator"` - InfrastructureRolesSecretName spec.NamespacedName `name:"infrastructure_roles_secret_name"` - SuperUsername string `name:"super_username" default:"postgres"` - ReplicationUsername string `name:"replication_username" default:"standby"` + SecretNameTemplate StringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"` + PamRoleName string `name:"pam_role_name" default:"zalandos"` + PamConfiguration string `name:"pam_configuration" default:"https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees"` + TeamsAPIUrl string `name:"teams_api_url" default:"https://teams.example.com/api/"` + OAuthTokenSecretName spec.NamespacedName `name:"oauth_token_secret_name" default:"postgresql-operator"` + InfrastructureRolesSecretName spec.NamespacedName `name:"infrastructure_roles_secret_name"` + InfrastructureRoles []*InfrastructureRole `name:"-"` + InfrastructureRolesDefs string `name:"infrastructure_roles_secrets"` + SuperUsername string `name:"super_username" default:"postgres"` + ReplicationUsername string `name:"replication_username" default:"standby"` } // Scalyr holds the configuration for the Scalyr Agent sidecar for log shipping: @@ -73,14 +111,31 @@ type Scalyr struct { // LogicalBackup defines configuration for logical backup type LogicalBackup struct { - LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` - LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"` - LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` - LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""` - LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""` - LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""` - LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""` - LogicalBackupS3SSE string `name:"logical_backup_s3_sse" default:"AES256"` + LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` + LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.6.0"` + LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"` + LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` + LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""` + LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""` + LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""` + LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""` + LogicalBackupS3SSE string `name:"logical_backup_s3_sse" default:""` + LogicalBackupGoogleApplicationCredentials string `name:"logical_backup_google_application_credentials" default:""` + LogicalBackupJobPrefix string `name:"logical_backup_job_prefix" default:"logical-backup-"` +} + +// Operator options for connection pooler +type ConnectionPooler struct { + NumberOfInstances *int32 `name:"connection_pooler_number_of_instances" default:"2"` + Schema string `name:"connection_pooler_schema" default:"pooler"` + User string `name:"connection_pooler_user" default:"pooler"` + Image string `name:"connection_pooler_image" default:"registry.opensource.zalan.do/acid/pgbouncer"` + Mode string `name:"connection_pooler_mode" default:"transaction"` + MaxDBConnections *int32 `name:"connection_pooler_max_db_connections" default:"60"` + ConnectionPoolerDefaultCPURequest string `name:"connection_pooler_default_cpu_request" default:"500m"` + ConnectionPoolerDefaultMemoryRequest string `name:"connection_pooler_default_memory_request" default:"100Mi"` + ConnectionPoolerDefaultCPULimit string `name:"connection_pooler_default_cpu_limit" default:"1"` + ConnectionPoolerDefaultMemoryLimit string `name:"connection_pooler_default_memory_limit" default:"100Mi"` } // Config describes operator config @@ -90,12 +145,15 @@ type Config struct { Auth Scalyr LogicalBackup + ConnectionPooler - WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' - EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS - DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"` - Sidecars map[string]string `name:"sidecar_docker_images"` - PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` + WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' + KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"` + EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS + DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.0-p2"` + SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers + SidecarContainers []v1.Container `name:"sidecars"` + PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` // value of this string must be valid JSON or YAML; see initPodServiceAccount PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` PodServiceAccountRoleDefinition string `name:"pod_service_account_role_definition" default:""` @@ -106,43 +164,53 @@ type Config struct { WALES3Bucket string `name:"wal_s3_bucket"` LogS3Bucket string `name:"log_s3_bucket"` KubeIAMRole string `name:"kube_iam_role"` + WALGSBucket string `name:"wal_gs_bucket"` + GCPCredentials string `name:"gcp_credentials"` AdditionalSecretMount string `name:"additional_secret_mount"` AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"` + EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"` + EnableEBSGp3MigrationMaxSize int64 `name:"enable_ebs_gp3_migration_max_size" default:"1000"` DebugLogging bool `name:"debug_logging" default:"true"` EnableDBAccess bool `name:"enable_database_access" default:"true"` EnableTeamsAPI bool `name:"enable_teams_api" default:"true"` EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"` TeamAdminRole string `name:"team_admin_role" default:"admin"` EnableAdminRoleForUsers bool `name:"enable_admin_role_for_users" default:"true"` + EnablePostgresTeamCRD bool `name:"enable_postgres_team_crd" default:"false"` + EnablePostgresTeamCRDSuperusers bool `name:"enable_postgres_team_crd_superusers" default:"false"` EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"` EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"` CustomServiceAnnotations map[string]string `name:"custom_service_annotations"` CustomPodAnnotations map[string]string `name:"custom_pod_annotations"` EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"` PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"` - // deprecated and kept for backward compatibility - EnableLoadBalancer *bool `name:"enable_load_balancer"` - MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` - ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` - PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"` - EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"` - EnableInitContainers *bool `name:"enable_init_containers" default:"true"` - EnableSidecars *bool `name:"enable_sidecars" default:"true"` - Workers uint32 `name:"workers" default:"4"` - APIPort int `name:"api_port" default:"8080"` - RingLogLines int `name:"ring_log_lines" default:"100"` - ClusterHistoryEntries int `name:"cluster_history_entries" default:"1000"` - TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"` - PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` - PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"` - ProtectedRoles []string `name:"protected_role_names" default:"admin"` - PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` - SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"` + StorageResizeMode string `name:"storage_resize_mode" default:"pvc"` + EnableLoadBalancer *bool `name:"enable_load_balancer"` // deprecated and kept for backward compatibility + ExternalTrafficPolicy string `name:"external_traffic_policy" default:"Cluster"` + MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` + ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` + PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"` + EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"` + EnableInitContainers *bool `name:"enable_init_containers" default:"true"` + EnableSidecars *bool `name:"enable_sidecars" default:"true"` + Workers uint32 `name:"workers" default:"8"` + APIPort int `name:"api_port" default:"8080"` + RingLogLines int `name:"ring_log_lines" default:"100"` + ClusterHistoryEntries int `name:"cluster_history_entries" default:"1000"` + TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"` + PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` + PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"` + ProtectedRoles []string `name:"protected_role_names" default:"admin"` + PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` + SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"` + EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"` + EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"` + EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"` } // MustMarshal marshals the config or panics func (c Config) MustMarshal() string { - b, err := json.MarshalIndent(c, "", "\t") + b, err := json.MarshalIndent(c, "", " ") if err != nil { panic(err) } @@ -197,5 +265,16 @@ func validate(cfg *Config) (err error) { if cfg.Workers == 0 { err = fmt.Errorf("number of workers should be higher than 0") } + + if *cfg.ConnectionPooler.NumberOfInstances < constants.ConnectionPoolerMinInstances { + msg := "number of connection pooler instances should be higher than %d" + err = fmt.Errorf(msg, constants.ConnectionPoolerMinInstances) + } + + if cfg.ConnectionPooler.User == cfg.SuperUsername { + msg := "Connection pool user is not allowed to be the same as super user, username: %s" + err = fmt.Errorf(msg, cfg.ConnectionPooler.User) + } + return } diff --git a/pkg/util/constants/annotations.go b/pkg/util/constants/annotations.go index 0b93fc2e1..fc5a84fa5 100644 --- a/pkg/util/constants/annotations.go +++ b/pkg/util/constants/annotations.go @@ -7,4 +7,5 @@ const ( ElbTimeoutAnnotationValue = "3600" KubeIAmAnnotation = "iam.amazonaws.com/role" VolumeStorateProvisionerAnnotation = "pv.kubernetes.io/provisioned-by" + PostgresqlControllerAnnotationKey = "acid.zalan.do/controller" ) diff --git a/pkg/util/constants/pooler.go b/pkg/util/constants/pooler.go new file mode 100644 index 000000000..ded795bbe --- /dev/null +++ b/pkg/util/constants/pooler.go @@ -0,0 +1,18 @@ +package constants + +// Connection pooler specific constants +const ( + ConnectionPoolerUserName = "pooler" + ConnectionPoolerSchemaName = "pooler" + ConnectionPoolerDefaultType = "pgbouncer" + ConnectionPoolerDefaultMode = "transaction" + ConnectionPoolerDefaultCpuRequest = "500m" + ConnectionPoolerDefaultCpuLimit = "1" + ConnectionPoolerDefaultMemoryRequest = "100Mi" + ConnectionPoolerDefaultMemoryLimit = "100Mi" + + ConnectionPoolerContainer = 0 + ConnectionPoolerMaxDBConnections = 60 + ConnectionPoolerMaxClientConnections = 10000 + ConnectionPoolerMinInstances = 1 +) diff --git a/pkg/util/constants/roles.go b/pkg/util/constants/roles.go index 2c20d69db..dd906fe80 100644 --- a/pkg/util/constants/roles.go +++ b/pkg/util/constants/roles.go @@ -2,15 +2,21 @@ package constants // Roles specific constants const ( - PasswordLength = 64 - SuperuserKeyName = "superuser" - ReplicationUserKeyName = "replication" - RoleFlagSuperuser = "SUPERUSER" - RoleFlagInherit = "INHERIT" - RoleFlagLogin = "LOGIN" - RoleFlagNoLogin = "NOLOGIN" - RoleFlagCreateRole = "CREATEROLE" - RoleFlagCreateDB = "CREATEDB" - RoleFlagReplication = "REPLICATION" - RoleFlagByPassRLS = "BYPASSRLS" + PasswordLength = 64 + SuperuserKeyName = "superuser" + ConnectionPoolerUserKeyName = "pooler" + ReplicationUserKeyName = "replication" + RoleFlagSuperuser = "SUPERUSER" + RoleFlagInherit = "INHERIT" + RoleFlagLogin = "LOGIN" + RoleFlagNoLogin = "NOLOGIN" + RoleFlagCreateRole = "CREATEROLE" + RoleFlagCreateDB = "CREATEDB" + RoleFlagReplication = "REPLICATION" + RoleFlagByPassRLS = "BYPASSRLS" + OwnerRoleNameSuffix = "_owner" + ReaderRoleNameSuffix = "_reader" + WriterRoleNameSuffix = "_writer" + UserRoleNameSuffix = "_user" + DefaultSearchPath = "\"$user\"" ) diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index 94d1f5ac2..a21434d45 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -1,19 +1,28 @@ package k8sutil import ( + "context" "fmt" "reflect" b64 "encoding/base64" + "encoding/json" batchv1beta1 "k8s.io/api/batch/v1beta1" clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" + apiacidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + acidv1client "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" + acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/spec" + apiappsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" policybeta1 "k8s.io/api/policy/v1beta1" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apiextbeta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -21,11 +30,12 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - - acidv1client "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +func Int32ToPointer(value int32) *int32 { + return &value +} + // KubernetesClient describes getters for Kubernetes objects type KubernetesClient struct { corev1.SecretsGetter @@ -38,12 +48,17 @@ type KubernetesClient struct { corev1.NodesGetter corev1.NamespacesGetter corev1.ServiceAccountsGetter + corev1.EventsGetter appsv1.StatefulSetsGetter + appsv1.DeploymentsGetter rbacv1.RolesGetter rbacv1.RoleBindingsGetter policyv1beta1.PodDisruptionBudgetsGetter - apiextbeta1.CustomResourceDefinitionsGetter + apiextv1.CustomResourceDefinitionsGetter clientbatchv1beta1.CronJobsGetter + acidv1.OperatorConfigurationsGetter + acidv1.PostgresTeamsGetter + acidv1.PostgresqlsGetter RESTClient rest.Interface AcidV1ClientSet *acidv1client.Clientset @@ -56,6 +71,34 @@ type mockSecret struct { type MockSecretGetter struct { } +type mockDeployment struct { + appsv1.DeploymentInterface +} + +type mockDeploymentNotExist struct { + appsv1.DeploymentInterface +} + +type MockDeploymentGetter struct { +} + +type MockDeploymentNotExistGetter struct { +} + +type mockService struct { + corev1.ServiceInterface +} + +type mockServiceNotExist struct { + corev1.ServiceInterface +} + +type MockServiceGetter struct { +} + +type MockServiceNotExistGetter struct { +} + type mockConfigMap struct { corev1.ConfigMapInterface } @@ -102,28 +145,65 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { kubeClient.NodesGetter = client.CoreV1() kubeClient.NamespacesGetter = client.CoreV1() kubeClient.StatefulSetsGetter = client.AppsV1() + kubeClient.DeploymentsGetter = client.AppsV1() kubeClient.PodDisruptionBudgetsGetter = client.PolicyV1beta1() kubeClient.RESTClient = client.CoreV1().RESTClient() kubeClient.RolesGetter = client.RbacV1() kubeClient.RoleBindingsGetter = client.RbacV1() kubeClient.CronJobsGetter = client.BatchV1beta1() + kubeClient.EventsGetter = client.CoreV1() apiextClient, err := apiextclient.NewForConfig(cfg) if err != nil { return kubeClient, fmt.Errorf("could not create api client:%v", err) } - kubeClient.CustomResourceDefinitionsGetter = apiextClient.ApiextensionsV1beta1() + kubeClient.CustomResourceDefinitionsGetter = apiextClient.ApiextensionsV1() + kubeClient.AcidV1ClientSet = acidv1client.NewForConfigOrDie(cfg) + if err != nil { + return kubeClient, fmt.Errorf("could not create acid.zalan.do clientset: %v", err) + } + + kubeClient.OperatorConfigurationsGetter = kubeClient.AcidV1ClientSet.AcidV1() + kubeClient.PostgresTeamsGetter = kubeClient.AcidV1ClientSet.AcidV1() + kubeClient.PostgresqlsGetter = kubeClient.AcidV1ClientSet.AcidV1() return kubeClient, nil } +// SetPostgresCRDStatus of Postgres cluster +func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string) (*apiacidv1.Postgresql, error) { + var pg *apiacidv1.Postgresql + var pgStatus apiacidv1.PostgresStatus + pgStatus.PostgresClusterStatus = status + + patch, err := json.Marshal(struct { + PgStatus interface{} `json:"status"` + }{&pgStatus}) + + if err != nil { + return pg, fmt.Errorf("could not marshal status: %v", err) + } + + // we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ), + // however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11) + // we should take advantage of it. + pg, err = client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).Patch( + context.TODO(), clusterName.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status") + if err != nil { + return pg, fmt.Errorf("could not update status: %v", err) + } + + // update the spec, maintaining the new resourceVersion. + return pg, nil +} + // SameService compares the Services func SameService(cur, new *v1.Service) (match bool, reason string) { //TODO: improve comparison if cur.Spec.Type != new.Spec.Type { - return false, fmt.Sprintf("new service's type %q doesn't match the current one %q", + return false, fmt.Sprintf("new service's type %q does not match the current one %q", new.Spec.Type, cur.Spec.Type) } @@ -133,13 +213,13 @@ func SameService(cur, new *v1.Service) (match bool, reason string) { /* work around Kubernetes 1.6 serializing [] as nil. See https://github.com/kubernetes/kubernetes/issues/43203 */ if (len(oldSourceRanges) != 0) || (len(newSourceRanges) != 0) { if !reflect.DeepEqual(oldSourceRanges, newSourceRanges) { - return false, "new service's LoadBalancerSourceRange doesn't match the current one" + return false, "new service's LoadBalancerSourceRange does not match the current one" } } match = true - reasonPrefix := "new service's annotations doesn't match the current one:" + reasonPrefix := "new service's annotations does not match the current one:" for ann := range cur.Annotations { if _, ok := new.Annotations[ann]; !ok { match = false @@ -175,7 +255,7 @@ func SamePDB(cur, new *policybeta1.PodDisruptionBudget) (match bool, reason stri //TODO: improve comparison match = reflect.DeepEqual(new.Spec, cur.Spec) if !match { - reason = "new PDB spec doesn't match the current one" + reason = "new PDB spec does not match the current one" } return @@ -189,62 +269,230 @@ func getJobImage(cronJob *batchv1beta1.CronJob) string { func SameLogicalBackupJob(cur, new *batchv1beta1.CronJob) (match bool, reason string) { if cur.Spec.Schedule != new.Spec.Schedule { - return false, fmt.Sprintf("new job's schedule %q doesn't match the current one %q", + return false, fmt.Sprintf("new job's schedule %q does not match the current one %q", new.Spec.Schedule, cur.Spec.Schedule) } newImage := getJobImage(new) curImage := getJobImage(cur) if newImage != curImage { - return false, fmt.Sprintf("new job's image %q doesn't match the current one %q", + return false, fmt.Sprintf("new job's image %q does not match the current one %q", newImage, curImage) } return true, "" } -func (c *mockSecret) Get(name string, options metav1.GetOptions) (*v1.Secret, error) { - if name != "infrastructureroles-test" { - return nil, fmt.Errorf("NotFound") - } - secret := &v1.Secret{} - secret.Name = "testcluster" - secret.Data = map[string][]byte{ +func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) { + oldFormatSecret := &v1.Secret{} + oldFormatSecret.Name = "testcluster" + oldFormatSecret.Data = map[string][]byte{ "user1": []byte("testrole"), "password1": []byte("testpassword"), "inrole1": []byte("testinrole"), "foobar": []byte(b64.StdEncoding.EncodeToString([]byte("password"))), } - return secret, nil + + newFormatSecret := &v1.Secret{} + newFormatSecret.Name = "test-secret-new-format" + newFormatSecret.Data = map[string][]byte{ + "user": []byte("new-test-role"), + "password": []byte("new-test-password"), + "inrole": []byte("new-test-inrole"), + "new-foobar": []byte(b64.StdEncoding.EncodeToString([]byte("password"))), + } + + secrets := map[string]*v1.Secret{ + "infrastructureroles-old-test": oldFormatSecret, + "infrastructureroles-new-test": newFormatSecret, + } + + for idx := 1; idx <= 2; idx++ { + newFormatStandaloneSecret := &v1.Secret{} + newFormatStandaloneSecret.Name = fmt.Sprintf("test-secret-new-format%d", idx) + newFormatStandaloneSecret.Data = map[string][]byte{ + "user": []byte(fmt.Sprintf("new-test-role%d", idx)), + "password": []byte(fmt.Sprintf("new-test-password%d", idx)), + "inrole": []byte(fmt.Sprintf("new-test-inrole%d", idx)), + } + + secrets[fmt.Sprintf("infrastructureroles-new-test%d", idx)] = + newFormatStandaloneSecret + } + + if secret, exists := secrets[name]; exists { + return secret, nil + } + + return nil, fmt.Errorf("NotFound") } -func (c *mockConfigMap) Get(name string, options metav1.GetOptions) (*v1.ConfigMap, error) { - if name != "infrastructureroles-test" { - return nil, fmt.Errorf("NotFound") - } - configmap := &v1.ConfigMap{} - configmap.Name = "testcluster" - configmap.Data = map[string]string{ +func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) { + oldFormatConfigmap := &v1.ConfigMap{} + oldFormatConfigmap.Name = "testcluster" + oldFormatConfigmap.Data = map[string]string{ "foobar": "{}", } - return configmap, nil + + newFormatConfigmap := &v1.ConfigMap{} + newFormatConfigmap.Name = "testcluster" + newFormatConfigmap.Data = map[string]string{ + "new-foobar": "{\"user_flags\": [\"createdb\"]}", + } + + configmaps := map[string]*v1.ConfigMap{ + "infrastructureroles-old-test": oldFormatConfigmap, + "infrastructureroles-new-test": newFormatConfigmap, + } + + if configmap, exists := configmaps[name]; exists { + return configmap, nil + } + + return nil, fmt.Errorf("NotFound") } // Secrets to be mocked -func (c *MockSecretGetter) Secrets(namespace string) corev1.SecretInterface { +func (mock *MockSecretGetter) Secrets(namespace string) corev1.SecretInterface { return &mockSecret{} } // ConfigMaps to be mocked -func (c *MockConfigMapsGetter) ConfigMaps(namespace string) corev1.ConfigMapInterface { +func (mock *MockConfigMapsGetter) ConfigMaps(namespace string) corev1.ConfigMapInterface { return &mockConfigMap{} } +func (mock *MockDeploymentGetter) Deployments(namespace string) appsv1.DeploymentInterface { + return &mockDeployment{} +} + +func (mock *MockDeploymentNotExistGetter) Deployments(namespace string) appsv1.DeploymentInterface { + return &mockDeploymentNotExist{} +} + +func (mock *mockDeployment) Create(context.Context, *apiappsv1.Deployment, metav1.CreateOptions) (*apiappsv1.Deployment, error) { + return &apiappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + }, + Spec: apiappsv1.DeploymentSpec{ + Replicas: Int32ToPointer(1), + }, + }, nil +} + +func (mock *mockDeployment) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return nil +} + +func (mock *mockDeployment) Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiappsv1.Deployment, error) { + return &apiappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + }, + Spec: apiappsv1.DeploymentSpec{ + Replicas: Int32ToPointer(1), + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Image: "pooler:1.0", + }, + }, + }, + }, + }, + }, nil +} + +func (mock *mockDeployment) Patch(ctx context.Context, name string, t types.PatchType, data []byte, opts metav1.PatchOptions, subres ...string) (*apiappsv1.Deployment, error) { + return &apiappsv1.Deployment{ + Spec: apiappsv1.DeploymentSpec{ + Replicas: Int32ToPointer(2), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + }, + }, nil +} + +func (mock *mockDeploymentNotExist) Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiappsv1.Deployment, error) { + return nil, &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } +} + +func (mock *mockDeploymentNotExist) Create(context.Context, *apiappsv1.Deployment, metav1.CreateOptions) (*apiappsv1.Deployment, error) { + return &apiappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + }, + Spec: apiappsv1.DeploymentSpec{ + Replicas: Int32ToPointer(1), + }, + }, nil +} + +func (mock *MockServiceGetter) Services(namespace string) corev1.ServiceInterface { + return &mockService{} +} + +func (mock *MockServiceNotExistGetter) Services(namespace string) corev1.ServiceInterface { + return &mockServiceNotExist{} +} + +func (mock *mockService) Create(context.Context, *v1.Service, metav1.CreateOptions) (*v1.Service, error) { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + }, + }, nil +} + +func (mock *mockService) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return nil +} + +func (mock *mockService) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + }, + }, nil +} + +func (mock *mockServiceNotExist) Create(context.Context, *v1.Service, metav1.CreateOptions) (*v1.Service, error) { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + }, + }, nil +} + +func (mock *mockServiceNotExist) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) { + return nil, &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonNotFound, + }, + } +} + // NewMockKubernetesClient for other tests func NewMockKubernetesClient() KubernetesClient { return KubernetesClient{ - SecretsGetter: &MockSecretGetter{}, - ConfigMapsGetter: &MockConfigMapsGetter{}, + SecretsGetter: &MockSecretGetter{}, + ConfigMapsGetter: &MockConfigMapsGetter{}, + DeploymentsGetter: &MockDeploymentGetter{}, + ServicesGetter: &MockServiceGetter{}, + } +} + +func ClientMissingObjects() KubernetesClient { + return KubernetesClient{ + DeploymentsGetter: &MockDeploymentNotExistGetter{}, + ServicesGetter: &MockServiceNotExistGetter{}, } } diff --git a/pkg/util/k8sutil/k8sutil_test.go b/pkg/util/k8sutil/k8sutil_test.go index 9b4f2eac3..b3e768501 100644 --- a/pkg/util/k8sutil/k8sutil_test.go +++ b/pkg/util/k8sutil/k8sutil_test.go @@ -63,7 +63,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's type "LoadBalancer" doesn't match the current one "ClusterIP"`, + reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`, }, { about: "services differ on lb source ranges", @@ -82,7 +82,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"185.249.56.0/22"}), match: false, - reason: `new service's LoadBalancerSourceRange doesn't match the current one`, + reason: `new service's LoadBalancerSourceRange does not match the current one`, }, { about: "new service doesn't have lb source ranges", @@ -101,7 +101,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{}), match: false, - reason: `new service's LoadBalancerSourceRange doesn't match the current one`, + reason: `new service's LoadBalancerSourceRange does not match the current one`, }, { about: "services differ on DNS annotation", @@ -120,7 +120,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: 'external-dns.alpha.kubernetes.io/hostname' changed from 'clstr.acid.zalan.do' to 'new_clstr.acid.zalan.do'.`, + reason: `new service's annotations does not match the current one: 'external-dns.alpha.kubernetes.io/hostname' changed from 'clstr.acid.zalan.do' to 'new_clstr.acid.zalan.do'.`, }, { about: "services differ on AWS ELB annotation", @@ -139,7 +139,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: 'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout' changed from '3600' to '1800'.`, + reason: `new service's annotations does not match the current one: 'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout' changed from '3600' to '1800'.`, }, { about: "service changes existing annotation", @@ -160,7 +160,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: 'foo' changed from 'bar' to 'baz'.`, + reason: `new service's annotations does not match the current one: 'foo' changed from 'bar' to 'baz'.`, }, { about: "service changes multiple existing annotations", @@ -184,7 +184,7 @@ func TestSameService(t *testing.T) { []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, // Test just the prefix to avoid flakiness and map sorting - reason: `new service's annotations doesn't match the current one:`, + reason: `new service's annotations does not match the current one:`, }, { about: "service adds a new custom annotation", @@ -204,7 +204,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: Added 'foo' with value 'bar'.`, + reason: `new service's annotations does not match the current one: Added 'foo' with value 'bar'.`, }, { about: "service removes a custom annotation", @@ -224,7 +224,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: Removed 'foo'.`, + reason: `new service's annotations does not match the current one: Removed 'foo'.`, }, { about: "service removes a custom annotation and adds a new one", @@ -245,7 +245,7 @@ func TestSameService(t *testing.T) { v1.ServiceTypeLoadBalancer, []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, - reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'.`, + reason: `new service's annotations does not match the current one: Removed 'foo'. Added 'bar' with value 'foo'.`, }, { about: "service removes a custom annotation, adds a new one and change another", @@ -269,7 +269,7 @@ func TestSameService(t *testing.T) { []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, // Test just the prefix to avoid flakiness and map sorting - reason: `new service's annotations doesn't match the current one: Removed 'foo'.`, + reason: `new service's annotations does not match the current one: Removed 'foo'.`, }, { about: "service add annotations", @@ -286,7 +286,7 @@ func TestSameService(t *testing.T) { []string{"128.141.0.0/16", "137.138.0.0/16"}), match: false, // Test just the prefix to avoid flakiness and map sorting - reason: `new service's annotations doesn't match the current one: Added `, + reason: `new service's annotations does not match the current one: Added `, }, } for _, tt := range tests { diff --git a/pkg/util/nicediff/diff.go b/pkg/util/nicediff/diff.go new file mode 100644 index 000000000..e2793f2c7 --- /dev/null +++ b/pkg/util/nicediff/diff.go @@ -0,0 +1,191 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package nicediff + +import ( + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string, skipEqual bool) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + return Render(DiffChunks(aLines, bLines), skipEqual) +} + +// Render renders the slice of chunks into a representation that prefixes +// the lines with '+', '-', or ' ' depending on whether the line was added, +// removed, or equal (respectively). +func Render(chunks []Chunk, skipEqual bool) string { + buf := new(strings.Builder) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + if !skipEqual { + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/pkg/util/patroni/patroni.go b/pkg/util/patroni/patroni.go index bdd96f048..53065e599 100644 --- a/pkg/util/patroni/patroni.go +++ b/pkg/util/patroni/patroni.go @@ -3,6 +3,7 @@ package patroni import ( "bytes" "encoding/json" + "errors" "fmt" "io/ioutil" "net" @@ -11,7 +12,7 @@ import ( "time" "github.com/sirupsen/logrus" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" ) const ( @@ -25,6 +26,7 @@ const ( type Interface interface { Switchover(master *v1.Pod, candidate string) error SetPostgresParameters(server *v1.Pod, options map[string]string) error + GetPatroniMemberState(pod *v1.Pod) (string, error) } // Patroni API client @@ -123,3 +125,36 @@ func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]st } return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf) } + +//GetPatroniMemberState returns a state of member of a Patroni cluster +func (p *Patroni) GetPatroniMemberState(server *v1.Pod) (string, error) { + + apiURLString, err := apiURL(server) + if err != nil { + return "", err + } + response, err := p.httpClient.Get(apiURLString) + if err != nil { + return "", fmt.Errorf("could not perform Get request: %v", err) + } + defer response.Body.Close() + + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return "", fmt.Errorf("could not read response: %v", err) + } + + data := make(map[string]interface{}) + err = json.Unmarshal(body, &data) + if err != nil { + return "", err + } + + state, ok := data["state"].(string) + if !ok { + return "", errors.New("Patroni Get call response contains wrong type for 'state' field") + } + + return state, nil + +} diff --git a/pkg/util/retryutil/retry_util.go b/pkg/util/retryutil/retry_util.go index f8b61fc39..868ba6e98 100644 --- a/pkg/util/retryutil/retry_util.go +++ b/pkg/util/retryutil/retry_util.go @@ -27,7 +27,7 @@ func (t *Ticker) Tick() { <-t.ticker.C } func Retry(interval time.Duration, timeout time.Duration, f func() (bool, error)) error { //TODO: make the retry exponential if timeout < interval { - return fmt.Errorf("timout(%s) should be greater than interval(%v)", timeout, interval) + return fmt.Errorf("timeout(%s) should be greater than interval(%v)", timeout, interval) } tick := &Ticker{time.NewTicker(interval)} return RetryWorker(interval, timeout, tick, f) diff --git a/pkg/util/teams/teams_test.go b/pkg/util/teams/teams_test.go index 51bbcbc31..33d01b75b 100644 --- a/pkg/util/teams/teams_test.go +++ b/pkg/util/teams/teams_test.go @@ -133,11 +133,11 @@ var requestsURLtc = []struct { }{ { "coffee://localhost/", - fmt.Errorf(`Get coffee://localhost/teams/acid: unsupported protocol scheme "coffee"`), + fmt.Errorf(`Get "coffee://localhost/teams/acid": unsupported protocol scheme "coffee"`), }, { "http://192.168.0.%31/", - fmt.Errorf(`parse http://192.168.0.%%31/teams/acid: invalid URL escape "%%31"`), + fmt.Errorf(`parse "http://192.168.0.%%31/teams/acid": invalid URL escape "%%31"`), }, } diff --git a/pkg/util/users/users.go b/pkg/util/users/users.go index 112f89b43..5d97336e6 100644 --- a/pkg/util/users/users.go +++ b/pkg/util/users/users.go @@ -28,6 +28,7 @@ const ( // an existing roles of another role membership, nor it removes the already assigned flag // (except for the NOLOGIN). TODO: process other NOflags, i.e. NOSUPERUSER correctly. type DefaultUserSyncStrategy struct { + PasswordEncryption string } // ProduceSyncRequests figures out the types of changes that need to happen with the given users. @@ -45,7 +46,7 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM } } else { r := spec.PgSyncUserRequest{} - newMD5Password := util.PGUserPassword(newUser) + newMD5Password := util.NewEncryptor(strategy.PasswordEncryption).PGUserPassword(newUser) if dbUser.Password != newMD5Password { r.User.Password = newMD5Password @@ -73,36 +74,54 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM } // ExecuteSyncRequests makes actual database changes from the requests passed in its arguments. -func (strategy DefaultUserSyncStrategy) ExecuteSyncRequests(reqs []spec.PgSyncUserRequest, db *sql.DB) error { - for _, r := range reqs { - switch r.Kind { +func (strategy DefaultUserSyncStrategy) ExecuteSyncRequests(requests []spec.PgSyncUserRequest, db *sql.DB) error { + var reqretries []spec.PgSyncUserRequest + var errors []string + for _, request := range requests { + switch request.Kind { case spec.PGSyncUserAdd: - if err := strategy.createPgUser(r.User, db); err != nil { - return fmt.Errorf("could not create user %q: %v", r.User.Name, err) + if err := strategy.createPgUser(request.User, db); err != nil { + reqretries = append(reqretries, request) + errors = append(errors, fmt.Sprintf("could not create user %q: %v", request.User.Name, err)) } case spec.PGsyncUserAlter: - if err := strategy.alterPgUser(r.User, db); err != nil { - return fmt.Errorf("could not alter user %q: %v", r.User.Name, err) + if err := strategy.alterPgUser(request.User, db); err != nil { + reqretries = append(reqretries, request) + errors = append(errors, fmt.Sprintf("could not alter user %q: %v", request.User.Name, err)) } case spec.PGSyncAlterSet: - if err := strategy.alterPgUserSet(r.User, db); err != nil { - return fmt.Errorf("could not set custom user %q parameters: %v", r.User.Name, err) + if err := strategy.alterPgUserSet(request.User, db); err != nil { + reqretries = append(reqretries, request) + errors = append(errors, fmt.Sprintf("could not set custom user %q parameters: %v", request.User.Name, err)) } default: - return fmt.Errorf("unrecognized operation: %v", r.Kind) + return fmt.Errorf("unrecognized operation: %v", request.Kind) } } + + // creating roles might fail if group role members are created before the parent role + // retry adding roles as long as the number of failed attempts is shrinking + if len(reqretries) > 0 { + if len(reqretries) < len(requests) { + if err := strategy.ExecuteSyncRequests(reqretries, db); err != nil { + return err + } + } else { + return fmt.Errorf("could not execute sync requests for users: %v", errors) + } + } + return nil } -func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql.DB) (err error) { + +func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql.DB) error { queries := produceAlterRoleSetStmts(user) query := fmt.Sprintf(doBlockStmt, strings.Join(queries, ";")) - if _, err = db.Exec(query); err != nil { - err = fmt.Errorf("dB error: %v, query: %s", err, query) - return + if _, err := db.Exec(query); err != nil { + return fmt.Errorf("dB error: %v, query: %s", err, query) } - return + return nil } func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.DB) error { @@ -122,7 +141,7 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D if user.Password == "" { userPassword = "PASSWORD NULL" } else { - userPassword = fmt.Sprintf(passwordTemplate, util.PGUserPassword(user)) + userPassword = fmt.Sprintf(passwordTemplate, util.NewEncryptor(strategy.PasswordEncryption).PGUserPassword(user)) } query := fmt.Sprintf(createUserSQL, user.Name, strings.Join(userFlags, " "), userPassword) @@ -130,6 +149,12 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D return fmt.Errorf("dB error: %v, query: %s", err, query) } + if len(user.Parameters) > 0 { + if err := strategy.alterPgUserSet(user, db); err != nil { + return fmt.Errorf("incomplete setup for user %s: %v", user.Name, err) + } + } + return nil } @@ -137,7 +162,7 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB var resultStmt []string if user.Password != "" || len(user.Flags) > 0 { - alterStmt := produceAlterStmt(user) + alterStmt := produceAlterStmt(user, strategy.PasswordEncryption) resultStmt = append(resultStmt, alterStmt) } if len(user.MemberOf) > 0 { @@ -156,14 +181,14 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB return nil } -func produceAlterStmt(user spec.PgUser) string { +func produceAlterStmt(user spec.PgUser, encryption string) string { // ALTER ROLE ... LOGIN ENCRYPTED PASSWORD .. result := make([]string, 0) password := user.Password flags := user.Flags if password != "" { - result = append(result, fmt.Sprintf(passwordTemplate, util.PGUserPassword(user))) + result = append(result, fmt.Sprintf(passwordTemplate, util.NewEncryptor(encryption).PGUserPassword(user))) } if len(flags) != 0 { result = append(result, strings.Join(flags, " ")) diff --git a/pkg/util/util.go b/pkg/util/util.go index ad6de14a2..bebb9f8da 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -1,11 +1,18 @@ package util import ( + "crypto/hmac" "crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords + cryptoRand "crypto/rand" + "crypto/sha256" + "encoding/base64" "encoding/hex" "fmt" + "math/big" "math/rand" + "reflect" "regexp" + "sort" "strings" "time" @@ -14,10 +21,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/zalando/postgres-operator/pkg/spec" + "golang.org/x/crypto/pbkdf2" ) const ( - md5prefix = "md5" + md5prefix = "md5" + scramsha256prefix = "SCRAM-SHA-256" + saltlength = 16 + iterations = 4096 ) var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") @@ -37,13 +48,17 @@ func False() *bool { return &b } -// RandomPassword generates random alphanumeric password of a given length. +// RandomPassword generates a secure, random alphanumeric password of a given length. func RandomPassword(n int) string { b := make([]byte, n) for i := range b { - b[i] = passwordChars[rand.Intn(len(passwordChars))] + maxN := big.NewInt(int64(len(passwordChars))) + if n, err := cryptoRand.Int(cryptoRand.Reader, maxN); err != nil { + panic(fmt.Errorf("Unable to generate secure, random password: %v", err)) + } else { + b[i] = passwordChars[n.Int64()] + } } - return string(b) } @@ -55,16 +70,62 @@ func NameFromMeta(meta metav1.ObjectMeta) spec.NamespacedName { } } -// PGUserPassword is used to generate md5 password hash for a given user. It does nothing for already hashed passwords. -func PGUserPassword(user spec.PgUser) string { - if (len(user.Password) == md5.Size*2+len(md5prefix) && user.Password[:3] == md5prefix) || user.Password == "" { +type Hasher func(user spec.PgUser) string +type Random func(n int) string + +type Encryptor struct { + encrypt Hasher + random Random +} + +func NewEncryptor(encryption string) *Encryptor { + e := Encryptor{random: RandomPassword} + m := map[string]Hasher{ + "md5": e.PGUserPasswordMD5, + "scram-sha-256": e.PGUserPasswordScramSHA256, + } + hasher, ok := m[encryption] + if !ok { + hasher = e.PGUserPasswordMD5 + } + e.encrypt = hasher + return &e +} + +func (e *Encryptor) PGUserPassword(user spec.PgUser) string { + if (len(user.Password) == md5.Size*2+len(md5prefix) && user.Password[:3] == md5prefix) || + (len(user.Password) > len(scramsha256prefix) && user.Password[:len(scramsha256prefix)] == scramsha256prefix) || user.Password == "" { // Avoid processing already encrypted or empty passwords return user.Password } + return e.encrypt(user) +} + +func (e *Encryptor) PGUserPasswordMD5(user spec.PgUser) string { s := md5.Sum([]byte(user.Password + user.Name)) // #nosec, using md5 since PostgreSQL uses it for hashing passwords. return md5prefix + hex.EncodeToString(s[:]) } +func (e *Encryptor) PGUserPasswordScramSHA256(user spec.PgUser) string { + salt := []byte(e.random(saltlength)) + key := pbkdf2.Key([]byte(user.Password), salt, iterations, 32, sha256.New) + mac := hmac.New(sha256.New, key) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + mac = hmac.New(sha256.New, key) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + storedKey := sha256.Sum256(clientKey) + pass := fmt.Sprintf("%s$%v:%s$%s:%s", + scramsha256prefix, + iterations, + base64.StdEncoding.EncodeToString(salt), + base64.StdEncoding.EncodeToString(storedKey[:]), + base64.StdEncoding.EncodeToString(serverKey), + ) + return pass +} + // Diff returns diffs between 2 objects func Diff(a, b interface{}) []string { return pretty.Diff(a, b) @@ -75,6 +136,21 @@ func PrettyDiff(a, b interface{}) string { return strings.Join(Diff(a, b), "\n") } +// Compare two string slices while ignoring the order of elements +func IsEqualIgnoreOrder(a, b []string) bool { + if len(a) != len(b) { + return false + } + a_copy := make([]string, len(a)) + b_copy := make([]string, len(b)) + copy(a_copy, a) + copy(b_copy, b) + sort.Strings(a_copy) + sort.Strings(b_copy) + + return reflect.DeepEqual(a_copy, b_copy) +} + // SubstractStringSlices finds elements in a that are not in b and return them as a result slice. func SubstractStringSlices(a []string, b []string) (result []string, equal bool) { // Slices are assumed to contain unique elements only @@ -117,6 +193,20 @@ func FindNamedStringSubmatch(r *regexp.Regexp, s string) map[string]string { return res } +// SliceContains +func SliceContains(slice interface{}, item interface{}) bool { + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + panic("Invalid data-type") + } + for i := 0; i < s.Len(); i++ { + if s.Index(i).Interface() == item { + return true + } + } + return false +} + // MapContains returns true if and only if haystack contains all the keys from the needle with matching corresponding values func MapContains(haystack, needle map[string]string) bool { if len(haystack) < len(needle) { @@ -141,6 +231,100 @@ func Coalesce(val, defaultVal string) string { return val } +// CoalesceStrArr returns the first argument if it is not null, otherwise the second one. +func CoalesceStrArr(val, defaultVal []string) []string { + if len(val) == 0 { + return defaultVal + } + return val +} + +// CoalesceStrMap returns the first argument if it is not null, otherwise the second one. +func CoalesceStrMap(val, defaultVal map[string]string) map[string]string { + if len(val) == 0 { + return defaultVal + } + return val +} + +// CoalesceInt works like coalesce but for int +func CoalesceInt(val, defaultVal int) int { + if val == 0 { + return defaultVal + } + return val +} + +// CoalesceInt32 works like coalesce but for *int32 +func CoalesceInt32(val, defaultVal *int32) *int32 { + if val == nil { + return defaultVal + } + return val +} + +// CoalesceUInt32 works like coalesce but for uint32 +func CoalesceUInt32(val, defaultVal uint32) uint32 { + if val == 0 { + return defaultVal + } + return val +} + +// CoalesceInt64 works like coalesce but for int64 +func CoalesceInt64(val, defaultVal int64) int64 { + if val == 0 { + return defaultVal + } + return val +} + +// CoalesceBool works like coalesce but for *bool +func CoalesceBool(val, defaultVal *bool) *bool { + if val == nil { + return defaultVal + } + return val +} + +// CoalesceDuration works like coalesce but for time.Duration +func CoalesceDuration(val time.Duration, defaultVal string) time.Duration { + if val == 0 { + duration, err := time.ParseDuration(defaultVal) + if err != nil { + panic(err) + } + return duration + } + return val +} + +// Test if any of the values is nil +func testNil(values ...*int32) bool { + for _, v := range values { + if v == nil { + return true + } + } + + return false +} + +// MaxInt32 : Return maximum of two integers provided via pointers. If one value +// is not defined, return the other one. If both are not defined, result is also +// undefined, caller needs to check for that. +func MaxInt32(a, b *int32) *int32 { + if testNil(a, b) { + return nil + } + + if *a > *b { + return a + } + + return b +} + // IsSmallerQuantity : checks if first resource is of a smaller quantity than the second func IsSmallerQuantity(requestStr, limitStr string) (bool, error) { diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 1f86ea1b4..c02d2c075 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -12,20 +12,27 @@ import ( ) var pgUsers = []struct { - in spec.PgUser - out string + in spec.PgUser + outmd5 string + outscramsha256 string }{{spec.PgUser{ Name: "test", Password: "password", Flags: []string{}, MemberOf: []string{}}, - "md587f77988ccb5aa917c93201ba314fcd4"}, + "md587f77988ccb5aa917c93201ba314fcd4", "SCRAM-SHA-256$4096:c2FsdA==$lF4cRm/Jky763CN4HtxdHnjV4Q8AWTNlKvGmEFFU8IQ=:ub8OgRsftnk2ccDMOt7ffHXNcikRkQkq1lh4xaAqrSw="}, {spec.PgUser{ Name: "test", Password: "md592f413f3974bdf3799bb6fecb5f9f2c6", Flags: []string{}, MemberOf: []string{}}, - "md592f413f3974bdf3799bb6fecb5f9f2c6"}} + "md592f413f3974bdf3799bb6fecb5f9f2c6", "md592f413f3974bdf3799bb6fecb5f9f2c6"}, + {spec.PgUser{ + Name: "test", + Password: "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs=", + Flags: []string{}, + MemberOf: []string{}}, + "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs=", "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs="}} var prettyDiffTest = []struct { inA interface{} @@ -36,6 +43,17 @@ var prettyDiffTest = []struct { {[]int{1, 2, 3, 4}, []int{1, 2, 3, 4}, ""}, } +var isEqualIgnoreOrderTest = []struct { + inA []string + inB []string + outEqual bool +}{ + {[]string{"a", "b", "c"}, []string{"a", "b", "c"}, true}, + {[]string{"a", "b", "c"}, []string{"a", "c", "b"}, true}, + {[]string{"a", "b"}, []string{"a", "c", "b"}, false}, + {[]string{"a", "b", "c"}, []string{"a", "d", "c"}, false}, +} + var substractTest = []struct { inA []string inB []string @@ -46,6 +64,16 @@ var substractTest = []struct { {[]string{"a", "b", "c", "d"}, []string{"a", "bb", "c", "d"}, []string{"b"}, false}, } +var sliceContaintsTest = []struct { + slice []string + item string + out bool +}{ + {[]string{"a", "b", "c"}, "a", true}, + {[]string{"a", "b", "c"}, "d", false}, + {[]string{}, "d", false}, +} + var mapContaintsTest = []struct { inA map[string]string inB map[string]string @@ -107,9 +135,16 @@ func TestNameFromMeta(t *testing.T) { func TestPGUserPassword(t *testing.T) { for _, tt := range pgUsers { - pwd := PGUserPassword(tt.in) - if pwd != tt.out { - t.Errorf("PgUserPassword expected: %q, got: %q", tt.out, pwd) + e := NewEncryptor("md5") + pwd := e.PGUserPassword(tt.in) + if pwd != tt.outmd5 { + t.Errorf("PgUserPassword expected: %q, got: %q", tt.outmd5, pwd) + } + e = NewEncryptor("scram-sha-256") + e.random = func(n int) string { return "salt" } + pwd = e.PGUserPassword(tt.in) + if pwd != tt.outscramsha256 { + t.Errorf("PgUserPassword expected: %q, got: %q", tt.outscramsha256, pwd) } } } @@ -122,6 +157,15 @@ func TestPrettyDiff(t *testing.T) { } } +func TestIsEqualIgnoreOrder(t *testing.T) { + for _, tt := range isEqualIgnoreOrderTest { + actualEqual := IsEqualIgnoreOrder(tt.inA, tt.inB) + if actualEqual != tt.outEqual { + t.Errorf("IsEqualIgnoreOrder expected: %t, got: %t", tt.outEqual, actualEqual) + } + } +} + func TestSubstractSlices(t *testing.T) { for _, tt := range substractTest { actualRes, actualEqual := SubstractStringSlices(tt.inA, tt.inB) @@ -146,6 +190,15 @@ func TestFindNamedStringSubmatch(t *testing.T) { } } +func TestSliceContains(t *testing.T) { + for _, tt := range sliceContaintsTest { + res := SliceContains(tt.slice, tt.item) + if res != tt.out { + t.Errorf("SliceContains expected: %#v, got: %#v", tt.out, res) + } + } +} + func TestMapContains(t *testing.T) { for _, tt := range mapContaintsTest { res := MapContains(tt.inA, tt.inB) @@ -166,3 +219,13 @@ func TestIsSmallerQuantity(t *testing.T) { } } } + +/* +func TestNiceDiff(t *testing.T) { + o := "a\nb\nc\n" + n := "b\nd\n" + d := nicediff.Diff(o, n, true) + t.Log(d) + // t.Errorf("Lets see output") +} +*/ diff --git a/pkg/util/volumes/ebs.go b/pkg/util/volumes/ebs.go index 666436a06..8f998b4cb 100644 --- a/pkg/util/volumes/ebs.go +++ b/pkg/util/volumes/ebs.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/retryutil" @@ -20,42 +20,80 @@ type EBSVolumeResizer struct { } // ConnectToProvider connects to AWS. -func (c *EBSVolumeResizer) ConnectToProvider() error { - sess, err := session.NewSession(&aws.Config{Region: aws.String(c.AWSRegion)}) +func (r *EBSVolumeResizer) ConnectToProvider() error { + sess, err := session.NewSession(&aws.Config{Region: aws.String(r.AWSRegion)}) if err != nil { return fmt.Errorf("could not establish AWS session: %v", err) } - c.connection = ec2.New(sess) + r.connection = ec2.New(sess) return nil } // IsConnectedToProvider checks if AWS connection is established. -func (c *EBSVolumeResizer) IsConnectedToProvider() bool { - return c.connection != nil +func (r *EBSVolumeResizer) IsConnectedToProvider() bool { + return r.connection != nil } // VolumeBelongsToProvider checks if the given persistent volume is backed by EBS. -func (c *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool { +func (r *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool { return pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner } -// GetProviderVolumeID converts aws://eu-central-1b/vol-00f93d4827217c629 to vol-00f93d4827217c629 for EBS volumes -func (c *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) { - volumeID := pv.Spec.AWSElasticBlockStore.VolumeID - if volumeID == "" { - return "", fmt.Errorf("volume id is empty for volume %q", pv.Name) - } +// ExtractVolumeID extracts volumeID +func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) { idx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1 if idx == 0 { - return "", fmt.Errorf("malfored EBS volume id %q", volumeID) + return "", fmt.Errorf("malformed EBS volume id %q", volumeID) } return volumeID[idx:], nil } +// GetProviderVolumeID converts aws://eu-central-1b/vol-00f93d4827217c629 to vol-00f93d4827217c629 for EBS volumes +func (r *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) { + volumeID := pv.Spec.AWSElasticBlockStore.VolumeID + if volumeID == "" { + return "", fmt.Errorf("got empty volume id for volume %v", pv) + } + + return r.ExtractVolumeID(volumeID) +} + +// DescribeVolumes ... +func (r *EBSVolumeResizer) DescribeVolumes(volumeIds []string) ([]VolumeProperties, error) { + if !r.IsConnectedToProvider() { + err := r.ConnectToProvider() + if err != nil { + return nil, err + } + } + + volumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: aws.StringSlice((volumeIds))}) + if err != nil { + return nil, err + } + + p := []VolumeProperties{} + if nil == volumeOutput.Volumes { + return p, nil + } + + for _, v := range volumeOutput.Volumes { + if *v.VolumeType == "gp3" { + p = append(p, VolumeProperties{VolumeID: *v.VolumeId, Size: *v.Size, VolumeType: *v.VolumeType, Iops: *v.Iops, Throughput: *v.Throughput}) + } else if *v.VolumeType == "gp2" { + p = append(p, VolumeProperties{VolumeID: *v.VolumeId, Size: *v.Size, VolumeType: *v.VolumeType}) + } else { + return nil, fmt.Errorf("Discovered unexpected volume type %s %s", *v.VolumeId, *v.VolumeType) + } + } + + return p, nil +} + // ResizeVolume actually calls AWS API to resize the EBS volume if necessary. -func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { +func (r *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { /* first check if the volume is already of a requested size */ - volumeOutput, err := c.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}}) + volumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}}) if err != nil { return fmt.Errorf("could not get information about the volume: %v", err) } @@ -68,7 +106,7 @@ func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { return nil } input := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID} - output, err := c.connection.ModifyVolume(&input) + output, err := r.connection.ModifyVolume(&input) if err != nil { return fmt.Errorf("could not modify persistent volume: %v", err) } @@ -87,7 +125,45 @@ func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { in := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}} return retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout, func() (bool, error) { - out, err := c.connection.DescribeVolumesModifications(&in) + out, err := r.connection.DescribeVolumesModifications(&in) + if err != nil { + return false, fmt.Errorf("could not describe volume modification: %v", err) + } + if len(out.VolumesModifications) != 1 { + return false, fmt.Errorf("describe volume modification didn't return one record for volume %q", volumeID) + } + if *out.VolumesModifications[0].VolumeId != volumeID { + return false, fmt.Errorf("non-matching volume id when describing modifications: %q is different from %q", + *out.VolumesModifications[0].VolumeId, volumeID) + } + return *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil + }) +} + +// ModifyVolume Modify EBS volume +func (r *EBSVolumeResizer) ModifyVolume(volumeID string, newType *string, newSize *int64, iops *int64, throughput *int64) error { + /* first check if the volume is already of a requested size */ + input := ec2.ModifyVolumeInput{Size: newSize, VolumeId: &volumeID, VolumeType: newType, Iops: iops, Throughput: throughput} + output, err := r.connection.ModifyVolume(&input) + if err != nil { + return fmt.Errorf("could not modify persistent volume: %v", err) + } + + state := *output.VolumeModification.ModificationState + if state == constants.EBSVolumeStateFailed { + return fmt.Errorf("could not modify persistent volume %q: modification state failed", volumeID) + } + if state == "" { + return fmt.Errorf("received empty modification status") + } + if state == constants.EBSVolumeStateOptimizing || state == constants.EBSVolumeStateCompleted { + return nil + } + // wait until the volume reaches the "optimizing" or "completed" state + in := ec2.DescribeVolumesModificationsInput{VolumeIds: []*string{&volumeID}} + return retryutil.Retry(constants.EBSVolumeResizeWaitInterval, constants.EBSVolumeResizeWaitTimeout, + func() (bool, error) { + out, err := r.connection.DescribeVolumesModifications(&in) if err != nil { return false, fmt.Errorf("could not describe volume modification: %v", err) } @@ -103,7 +179,7 @@ func (c *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { } // DisconnectFromProvider closes connection to the EC2 instance -func (c *EBSVolumeResizer) DisconnectFromProvider() error { - c.connection = nil +func (r *EBSVolumeResizer) DisconnectFromProvider() error { + r.connection = nil return nil } diff --git a/pkg/util/volumes/volumes.go b/pkg/util/volumes/volumes.go index 94c0fffc8..556729dc4 100644 --- a/pkg/util/volumes/volumes.go +++ b/pkg/util/volumes/volumes.go @@ -1,8 +1,17 @@ package volumes -import ( - "k8s.io/api/core/v1" -) +//go:generate mockgen -package mocks -destination=$PWD/mocks/$GOFILE -source=$GOFILE -build_flags=-mod=vendor + +import v1 "k8s.io/api/core/v1" + +// VolumeProperties ... +type VolumeProperties struct { + VolumeID string + VolumeType string + Size int64 + Iops int64 + Throughput int64 +} // VolumeResizer defines the set of methods used to implememnt provider-specific resizing of persistent volumes. type VolumeResizer interface { @@ -10,6 +19,9 @@ type VolumeResizer interface { IsConnectedToProvider() bool VolumeBelongsToProvider(pv *v1.PersistentVolume) bool GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) + ExtractVolumeID(volumeID string) (string, error) ResizeVolume(providerVolumeID string, newSize int64) error + ModifyVolume(providerVolumeID string, newType *string, newSize *int64, iops *int64, throughput *int64) error DisconnectFromProvider() error + DescribeVolumes(providerVolumesID []string) ([]VolumeProperties, error) } diff --git a/ui/Dockerfile b/ui/Dockerfile index 3e1ae8756..ad775ece2 100644 --- a/ui/Dockerfile +++ b/ui/Dockerfile @@ -1,7 +1,7 @@ -FROM alpine:3.6 -MAINTAINER team-acid@zalando.de +FROM registry.opensource.zalan.do/library/alpine-3.12:latest +LABEL maintainer="Team ACID @ Zalando " -EXPOSE 8080 +EXPOSE 8081 RUN \ apk add --no-cache \ @@ -29,6 +29,7 @@ RUN \ /var/cache/apk/* COPY requirements.txt / +COPY start_server.sh / RUN pip3 install -r /requirements.txt COPY operator_ui /operator_ui @@ -37,4 +38,4 @@ ARG VERSION=dev RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" /operator_ui/__init__.py WORKDIR / -ENTRYPOINT ["/usr/bin/python3", "-m", "operator_ui"] +CMD ["/usr/bin/python3", "-m", "operator_ui"] diff --git a/ui/Makefile b/ui/Makefile index e7d5df674..29c8d9409 100644 --- a/ui/Makefile +++ b/ui/Makefile @@ -36,4 +36,4 @@ push: docker push "$(IMAGE):$(TAG)$(CDP_TAG)" mock: - docker run -it -p 8080:8080 "$(IMAGE):$(TAG)" --mock + docker run -it -p 8081:8081 "$(IMAGE):$(TAG)" --mock diff --git a/ui/app/src/edit.tag.pug b/ui/app/src/edit.tag.pug index 9029594bd..c1d94e589 100644 --- a/ui/app/src/edit.tag.pug +++ b/ui/app/src/edit.tag.pug @@ -137,6 +137,7 @@ edit o.spec.numberOfInstances = i.spec.numberOfInstances o.spec.enableMasterLoadBalancer = i.spec.enableMasterLoadBalancer || false o.spec.enableReplicaLoadBalancer = i.spec.enableReplicaLoadBalancer || false + o.spec.enableConnectionPooler = i.spec.enableConnectionPooler || false o.spec.volume = { size: i.spec.volume.size } if ('users' in i.spec && typeof i.spec.users === 'object') { diff --git a/ui/app/src/new.tag.pug b/ui/app/src/new.tag.pug index fe9d78226..6293a6c7a 100644 --- a/ui/app/src/new.tag.pug +++ b/ui/app/src/new.tag.pug @@ -239,6 +239,18 @@ new | | Enable replica ELB + tr + td Enable Connection Pool + td + label + input( + type='checkbox' + value='{ enableConnectionPooler }' + onchange='{ toggleEnableConnectionPooler }' + ) + | + | Enable Connection Pool (using PGBouncer) + tr td Volume size td @@ -493,6 +505,9 @@ new {{#if enableReplicaLoadBalancer}} enableReplicaLoadBalancer: true {{/if}} + {{#if enableConnectionPooler}} + enableConnectionPooler: true + {{/if}} volume: size: "{{ volumeSize }}Gi" {{#if users}} @@ -516,13 +531,14 @@ new - {{ odd }}/32 {{/if}} + {{#if resourcesVisible}} resources: requests: cpu: {{ cpu.state.request.state }}m memory: {{ memory.state.request.state }}Mi limits: cpu: {{ cpu.state.limit.state }}m - memory: {{ memory.state.limit.state }}Mi{{#if restoring}} + memory: {{ memory.state.limit.state }}Mi{{/if}}{{#if restoring}} clone: cluster: "{{ backup.state.name.state }}" @@ -542,6 +558,7 @@ new instanceCount: this.instanceCount, enableMasterLoadBalancer: this.enableMasterLoadBalancer, enableReplicaLoadBalancer: this.enableReplicaLoadBalancer, + enableConnectionPooler: this.enableConnectionPooler, volumeSize: this.volumeSize, users: this.users.valids, databases: this.databases.valids, @@ -552,6 +569,7 @@ new memory: this.memory, backup: this.backup, namespace: this.namespace, + resourcesVisible: this.config.resources_visible, restoring: this.backup.state.type.state !== 'empty', pitr: this.backup.state.type.state === 'pitr', } @@ -598,6 +616,10 @@ new this.enableReplicaLoadBalancer = !this.enableReplicaLoadBalancer } + this.toggleEnableConnectionPooler = e => { + this.enableConnectionPooler = !this.enableConnectionPooler + } + this.volumeChange = e => { this.volumeSize = +e.target.value } @@ -892,6 +914,7 @@ new this.odd = '' this.enableMasterLoadBalancer = false this.enableReplicaLoadBalancer = false + this.enableConnectionPooler = false this.postgresqlVersion = this.postgresqlVersion = ( this.config.postgresql_versions[0] diff --git a/ui/app/src/postgresql.tag.pug b/ui/app/src/postgresql.tag.pug index be7173dbe..c557e4da8 100644 --- a/ui/app/src/postgresql.tag.pug +++ b/ui/app/src/postgresql.tag.pug @@ -74,11 +74,13 @@ postgresql .alert.alert-info(if='{ !progress.requestStatus }') PostgreSQL cluster requested .alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed - .alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() }) + .alert.alert-success(if='{ progress.requestStatus === "OK" }') Manifest creation successful ({ new Date(progress.createdTimestamp).toLocaleString() }) .alert.alert-info(if='{ !progress.postgresql }') PostgreSQL cluster manifest pending .alert.alert-success(if='{ progress.postgresql }') PostgreSQL cluster manifest created + .alert.alert-danger(if='{progress.status && progress.status.PostgresClusterStatus == "CreateFailed"}') Cluster creation failed: Check events and cluster name! + .alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending .alert.alert-success(if='{ progress.statefulSet }') StatefulSet created @@ -92,6 +94,8 @@ postgresql .alert.alert-success(if='{ progress.masterLabel }') PostgreSQL master available, label is attached .alert.alert-success(if='{ progress.masterLabel && progress.dnsName }') PostgreSQL ready: { progress.dnsName } + .alert.alert-success(if='{ progress.pooler }') Connection pooler deployment created + .col-lg-3 help-general(config='{ opts.config }') @@ -122,9 +126,13 @@ postgresql jQuery.get( '/postgresqls/' + this.cluster_path, ).done(data => { + this.progress.pooler = false this.progress.postgresql = true this.progress.postgresqlManifest = data + // copy status as we delete later for edit + this.progress.status = data.status this.progress.createdTimestamp = data.metadata.creationTimestamp + this.progress.poolerEnabled = data.spec.enableConnectionPooler this.uid = this.progress.postgresqlManifest.metadata.uid this.update() @@ -160,6 +168,11 @@ postgresql this.progress.dnsName = data.metadata.name + '.' + data.metadata.namespace } + jQuery.get('/pooler/' + this.cluster_path).done(data => { + this.progress.pooler = {"url": ""} + this.update() + }) + this.update() }) }) @@ -194,6 +207,7 @@ postgresql delete manifest.metadata.annotations[last_applied] } + delete manifest.metadata.managedFields delete manifest.metadata.creationTimestamp delete manifest.metadata.deletionGracePeriodSeconds delete manifest.metadata.deletionTimestamp diff --git a/ui/app/src/postgresqls.tag.pug b/ui/app/src/postgresqls.tag.pug index 250c175ec..38e5fcd9d 100644 --- a/ui/app/src/postgresqls.tag.pug +++ b/ui/app/src/postgresqls.tag.pug @@ -63,10 +63,8 @@ postgresqls td(style='white-space: pre') | { namespace } td - a( - href='/#/status/{ cluster_path(this) }' - ) - | { name } + a(href='/#/status/{ cluster_path(this) }') { name } + btn.btn-danger(if='{status.PostgresClusterStatus == "CreateFailed"}') Create Failed td { nodes } td { cpu } / { cpu_limit } td { memory } / { memory_limit } @@ -230,7 +228,7 @@ postgresqls ) const calcCosts = this.calcCosts = (nodes, cpu, memory, disk) => { - costs = nodes * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs) + costs = Math.max(nodes, opts.config.min_pods) * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs) return costs.toFixed(2) } diff --git a/ui/manifests/deployment.yaml b/ui/manifests/deployment.yaml index 477e4d655..51bb394b3 100644 --- a/ui/manifests/deployment.yaml +++ b/ui/manifests/deployment.yaml @@ -20,7 +20,7 @@ spec: serviceAccountName: postgres-operator-ui containers: - name: "service" - image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0 + image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.6.0 ports: - containerPort: 8081 protocol: "TCP" @@ -44,6 +44,8 @@ spec: value: "http://postgres-operator:8080" - name: "OPERATOR_CLUSTER_NAME_LABEL" value: "cluster-name" + - name: "RESOURCES_VISIBLE" + value: "False" - name: "TARGET_NAMESPACE" value: "default" - name: "TEAMS" @@ -66,10 +68,8 @@ spec: "cost_core": 0.0575, "cost_memory": 0.014375, "postgresql_versions": [ + "13", "12", - "11", - "10", - "9.6", - "9.5" + "11" ] } diff --git a/ui/manifests/kustomization.yaml b/ui/manifests/kustomization.yaml new file mode 100644 index 000000000..5803f854e --- /dev/null +++ b/ui/manifests/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- deployment.yaml +- ingress.yaml +- service.yaml +- ui-service-account-rbac.yaml diff --git a/ui/manifests/service.yaml b/ui/manifests/service.yaml index 989ec041e..7b820ca92 100644 --- a/ui/manifests/service.yaml +++ b/ui/manifests/service.yaml @@ -8,7 +8,7 @@ metadata: spec: type: "ClusterIP" selector: - application: "postgres-operator-ui" + name: "postgres-operator-ui" ports: - port: 80 protocol: "TCP" diff --git a/ui/manifests/ui-service-account-rbac.yaml b/ui/manifests/ui-service-account-rbac.yaml index 2e09797a0..d4937b5a2 100644 --- a/ui/manifests/ui-service-account-rbac.yaml +++ b/ui/manifests/ui-service-account-rbac.yaml @@ -39,6 +39,7 @@ rules: - apiGroups: - apps resources: + - deployments - statefulsets verbs: - get diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index 5a3054f0e..5fbb6d24e 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -7,6 +7,7 @@ gevent.monkey.patch_all() import requests import tokens +import sys from backoff import expo, on_exception from click import ParamType, command, echo, option @@ -25,7 +26,7 @@ from flask import ( from flask_oauthlib.client import OAuth from functools import wraps from gevent import sleep, spawn -from gevent.wsgi import WSGIServer +from gevent.pywsgi import WSGIServer from jq import jq from json import dumps, loads from logging import DEBUG, ERROR, INFO, basicConfig, exception, getLogger @@ -44,6 +45,7 @@ from .spiloutils import ( create_postgresql, read_basebackups, read_namespaces, + read_pooler, read_pods, read_postgresql, read_postgresqls, @@ -80,10 +82,12 @@ OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-nam OPERATOR_UI_CONFIG = getenv('OPERATOR_UI_CONFIG', '{}') OPERATOR_UI_MAINTENANCE_CHECK = getenv('OPERATOR_UI_MAINTENANCE_CHECK', '{}') READ_ONLY_MODE = getenv('READ_ONLY_MODE', False) in [True, 'true'] +RESOURCES_VISIBLE = getenv('RESOURCES_VISIBLE', True) SPILO_S3_BACKUP_PREFIX = getenv('SPILO_S3_BACKUP_PREFIX', 'spilo/') SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid') TARGET_NAMESPACE = getenv('TARGET_NAMESPACE') GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False) +MIN_PODS= getenv('MIN_PODS', 2) # storage pricing, i.e. https://aws.amazon.com/ebs/pricing/ COST_EBS = float(getenv('COST_EBS', 0.119)) # GB per month @@ -101,6 +105,8 @@ USE_AWS_INSTANCE_PROFILE = ( getenv('USE_AWS_INSTANCE_PROFILE', 'false').lower() != 'false' ) +AWS_ENDPOINT = getenv('AWS_ENDPOINT') + tokens.configure() tokens.manage('read-only') tokens.start() @@ -297,13 +303,14 @@ DEFAULT_UI_CONFIG = { 'users_visible': True, 'databases_visible': True, 'resources_visible': True, - 'postgresql_versions': ['9.6', '10', '11'], + 'postgresql_versions': ['11','12','13'], 'dns_format_string': '{0}.{1}.{2}', 'pgui_link': '', 'static_network_whitelist': {}, 'cost_ebs': COST_EBS, 'cost_core': COST_CORE, - 'cost_memory': COST_MEMORY + 'cost_memory': COST_MEMORY, + 'min_pods': MIN_PODS } @@ -312,8 +319,10 @@ DEFAULT_UI_CONFIG = { def get_config(): config = loads(OPERATOR_UI_CONFIG) or DEFAULT_UI_CONFIG config['read_only_mode'] = READ_ONLY_MODE + config['resources_visible'] = RESOURCES_VISIBLE config['superuser_team'] = SUPERUSER_TEAM config['target_namespace'] = TARGET_NAMESPACE + config['min_pods'] = MIN_PODS config['namespaces'] = ( [TARGET_NAMESPACE] @@ -397,6 +406,22 @@ def get_service(namespace: str, cluster: str): ) +@app.route('/pooler//') +@authorize +def get_list_poolers(namespace: str, cluster: str): + + if TARGET_NAMESPACE not in ['', '*', namespace]: + return wrong_namespace() + + return respond( + read_pooler( + get_cluster(), + namespace, + "{}-pooler".format(cluster), + ), + ) + + @app.route('/statefulsets//') @authorize def get_list_clusters(namespace: str, cluster: str): @@ -471,6 +496,7 @@ def get_postgresqls(): 'uid': uid, 'namespaced_name': namespace + '/' + name, 'full_name': namespace + '/' + name + ('/' + uid if uid else ''), + 'status': status, } for cluster in these( read_postgresqls( @@ -484,6 +510,7 @@ def get_postgresqls(): 'items', ) for spec in [cluster.get('spec', {}) if cluster.get('spec', {}) is not None else {"error": "Invalid spec in manifest"}] + for status in [cluster.get('status', {})] for metadata in [cluster['metadata']] for namespace in [metadata['namespace']] for name in [metadata['name']] @@ -587,6 +614,28 @@ def update_postgresql(namespace: str, cluster: str): spec['volume'] = {'size': size} + if 'enableConnectionPooler' in postgresql['spec']: + cp = postgresql['spec']['enableConnectionPooler'] + if not cp: + if 'enableConnectionPooler' in o['spec']: + del o['spec']['enableConnectionPooler'] + else: + spec['enableConnectionPooler'] = True + else: + if 'enableConnectionPooler' in o['spec']: + del o['spec']['enableConnectionPooler'] + + if 'enableReplicaConnectionPooler' in postgresql['spec']: + cp = postgresql['spec']['enableReplicaConnectionPooler'] + if not cp: + if 'enableReplicaConnectionPooler' in o['spec']: + del o['spec']['enableReplicaConnectionPooler'] + else: + spec['enableReplicaConnectionPooler'] = True + else: + if 'enableReplicaConnectionPooler' in o['spec']: + del o['spec']['enableReplicaConnectionPooler'] + if 'enableReplicaLoadBalancer' in postgresql['spec']: rlb = postgresql['spec']['enableReplicaLoadBalancer'] if not rlb: @@ -1006,7 +1055,7 @@ def init_cluster(): def main(port, secret_key, debug, clusters: list): global TARGET_NAMESPACE - basicConfig(level=DEBUG if debug else INFO) + basicConfig(stream=sys.stdout, level=(DEBUG if debug else INFO), format='%(asctime)s %(levelname)s: %(message)s',) init_cluster() @@ -1024,6 +1073,7 @@ def main(port, secret_key, debug, clusters: list): logger.info(f'Tokeninfo URL: {TOKENINFO_URL}') logger.info(f'Use AWS instance_profile: {USE_AWS_INSTANCE_PROFILE}') logger.info(f'WAL-E S3 endpoint: {WALE_S3_ENDPOINT}') + logger.info(f'AWS S3 endpoint: {AWS_ENDPOINT}') if TARGET_NAMESPACE is None: @on_exception( diff --git a/ui/operator_ui/spiloutils.py b/ui/operator_ui/spiloutils.py index 33d07d88a..26113bd54 100644 --- a/ui/operator_ui/spiloutils.py +++ b/ui/operator_ui/spiloutils.py @@ -1,7 +1,7 @@ from boto3 import client from datetime import datetime, timezone from furl import furl -from json import dumps +from json import dumps, loads from logging import getLogger from os import environ, getenv from requests import Session @@ -16,8 +16,19 @@ logger = getLogger(__name__) session = Session() +AWS_ENDPOINT = getenv('AWS_ENDPOINT') + OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name') +COMMON_CLUSTER_LABEL = getenv('COMMON_CLUSTER_LABEL', '{"application":"spilo"}') +COMMON_POOLER_LABEL = getenv('COMMON_POOLER_LABEL', '{"application":"db-connection-pooler"}') + +logger.info("Common Cluster Label: {}".format(COMMON_CLUSTER_LABEL)) +logger.info("Common Pooler Label: {}".format(COMMON_POOLER_LABEL)) + +COMMON_CLUSTER_LABEL = loads(COMMON_CLUSTER_LABEL) +COMMON_POOLER_LABEL = loads(COMMON_POOLER_LABEL) + def request(cluster, path, **kwargs): if 'timeout' not in kwargs: @@ -85,6 +96,7 @@ def resource_api_version(resource_type): return { 'postgresqls': 'apis/acid.zalan.do/v1', 'statefulsets': 'apis/apps/v1', + 'deployments': 'apis/apps/v1', }.get(resource_type, 'api/v1') @@ -95,6 +107,12 @@ def encode_labels(label_selector): ]) +def cluster_labels(spilo_cluster): + labels = COMMON_CLUSTER_LABEL + labels[OPERATOR_CLUSTER_NAME_LABEL] = spilo_cluster + return labels + + def kubernetes_url( resource_type, namespace='default', @@ -139,7 +157,7 @@ def read_pods(cluster, namespace, spilo_cluster): cluster=cluster, resource_type='pods', namespace=namespace, - label_selector={OPERATOR_CLUSTER_NAME_LABEL: spilo_cluster}, + label_selector=cluster_labels(spilo_cluster), ) @@ -149,7 +167,7 @@ def read_pod(cluster, namespace, resource_name): resource_type='pods', namespace=namespace, resource_name=resource_name, - label_selector={'application': 'spilo'}, + label_selector=COMMON_CLUSTER_LABEL, ) @@ -159,7 +177,17 @@ def read_service(cluster, namespace, resource_name): resource_type='services', namespace=namespace, resource_name=resource_name, - label_selector={'application': 'spilo'}, + label_selector=COMMON_CLUSTER_LABEL, + ) + + +def read_pooler(cluster, namespace, resource_name): + return kubernetes_get( + cluster=cluster, + resource_type='deployments', + namespace=namespace, + resource_name=resource_name, + label_selector=COMMON_POOLER_LABEL, ) @@ -169,7 +197,7 @@ def read_statefulset(cluster, namespace, resource_name): resource_type='statefulsets', namespace=namespace, resource_name=resource_name, - label_selector={'application': 'spilo'}, + label_selector=COMMON_CLUSTER_LABEL, ) @@ -246,7 +274,7 @@ def read_stored_clusters(bucket, prefix, delimiter='/'): return [ prefix['Prefix'].split('/')[-2] for prefix in these( - client('s3').list_objects( + client('s3', endpoint_url=AWS_ENDPOINT).list_objects( Bucket=bucket, Delimiter=delimiter, Prefix=prefix, @@ -267,7 +295,7 @@ def read_versions( return [ 'base' if uid == 'wal' else uid for prefix in these( - client('s3').list_objects( + client('s3', endpoint_url=AWS_ENDPOINT).list_objects( Bucket=bucket, Delimiter=delimiter, Prefix=prefix + pg_cluster + delimiter, @@ -280,6 +308,7 @@ def read_versions( if uid == 'wal' or defaulting(lambda: UUID(uid)) ] +BACKUP_VERSION_PREFIXES = ['','9.5/', '9.6/', '10/','11/', '12/', '13/'] def read_basebackups( pg_cluster, @@ -292,18 +321,24 @@ def read_basebackups( ): environ['WALE_S3_ENDPOINT'] = s3_endpoint suffix = '' if uid == 'base' else '/' + uid - return [ - { - key: value - for key, value in basebackup.__dict__.items() - if isinstance(value, str) or isinstance(value, int) - } - for basebackup in Attrs.call( - f=configure_backup_cxt, - aws_instance_profile=use_aws_instance_profile, - s3_prefix=f's3://{bucket}/{prefix}{pg_cluster}{suffix}/wal/', - )._backup_list(detail=True) - ] + backups = [] + + for vp in BACKUP_VERSION_PREFIXES: + + backups = backups + [ + { + key: value + for key, value in basebackup.__dict__.items() + if isinstance(value, str) or isinstance(value, int) + } + for basebackup in Attrs.call( + f=configure_backup_cxt, + aws_instance_profile=use_aws_instance_profile, + s3_prefix=f's3://{bucket}/{prefix}{pg_cluster}{suffix}/wal/{vp}', + )._backup_list(detail=True) + ] + + return backups def parse_time(s: str): diff --git a/ui/requirements.txt b/ui/requirements.txt index 5d987416c..8f612d554 100644 --- a/ui/requirements.txt +++ b/ui/requirements.txt @@ -1,15 +1,15 @@ Flask-OAuthlib==0.9.5 -Flask==1.1.1 -backoff==1.8.1 -boto3==1.10.4 +Flask==1.1.2 +backoff==1.10.0 +boto3==1.16.52 boto==2.49.0 -click==6.7 -furl==1.0.2 -gevent==1.2.2 -jq==0.1.6 +click==7.1.2 +furl==2.1.0 +gevent==20.12.1 +jq==1.1.1 json_delta>=2.0 kubernetes==3.0.0 -requests==2.22.0 +requests==2.25.1 stups-tokens>=1.1.19 -wal_e==1.1.0 +wal_e==1.1.1 werkzeug==0.16.1 diff --git a/ui/run_local.sh b/ui/run_local.sh index e331b2414..79723680a 100755 --- a/ui/run_local.sh +++ b/ui/run_local.sh @@ -23,11 +23,9 @@ default_operator_ui_config='{ "cost_core": 0.0575, "cost_memory": 0.014375, "postgresql_versions": [ + "13", "12", - "11", - "10", - "9.6", - "9.5" + "11" ], "static_network_whitelist": { "localhost": ["172.0.0.1/32"] diff --git a/ui/start_server.sh b/ui/start_server.sh new file mode 100644 index 000000000..e2c3980cc --- /dev/null +++ b/ui/start_server.sh @@ -0,0 +1,2 @@ +#!/bin/bash +/usr/bin/python3 -m operator_ui