Merge branch 'master' into gh-pages

This commit is contained in:
Felix Kunde 2020-12-30 16:30:37 +01:00
commit bef5dbcd94
153 changed files with 15819 additions and 5208 deletions

View File

@ -0,0 +1,19 @@
---
name: Postgres Operator issue template
about: How are you using the operator?
title: ''
labels: ''
assignees: ''
---
Please, answer some short questions which should help us to understand your problem / question better?
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.0
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
- **Are you running Postgres Operator in production?** [yes | no]
- **Type of issue?** [Bug report, question, feature request, etc.]
Some general remarks when posting a bug report:
- Please, check the operator, pod (Patroni) and postgresql logs first. When copy-pasting many log lines please do it in a separate GitHub gist together with your Postgres CRD and configuration manifest.
- If you feel this issue might be more related to the [Spilo](https://github.com/zalando/spilo/issues) docker image or [Patroni](https://github.com/zalando/patroni/issues), consider opening issues in the respective repos.

View File

@ -0,0 +1,18 @@
## Problem description
## Linked issues
## Checklist
Thanks for submitting a pull request to the Postgres Operator project.
Please, ensure your contribution matches the following items:
- [ ] Your go code is [formatted](https://blog.golang.org/gofmt). Your IDE should do it automatically for you.
- [ ] You have updated [generated code](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#code-generation) when introducing new fields to the `acid.zalan.do` api package.
- [ ] New [configuration options](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#introduce-additional-configuration-parameters) are reflected in CRD validation, helm charts and sample manifests.
- [ ] New functionality is covered by [unit](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#unit-tests) and/or [e2e](https://github.com/zalando/postgres-operator/blob/master/docs/developer.md#end-to-end-tests) tests.
- [ ] You have checked existing open PRs for possible overlay and referenced them.

25
.github/workflows/run_e2e.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
name: operator-e2e-tests
on:
pull_request:
push:
branches:
- master
jobs:
tests:
name: End-2-End tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v2
with:
go-version: "^1.15.6"
- name: Make dependencies
run: make deps mocks
- name: Compile
run: make linux
- name: Run unit tests
run: go test ./...
- name: Run end-2-end tests
run: make e2e

30
.github/workflows/run_tests.yaml vendored Normal file
View File

@ -0,0 +1,30 @@
name: operator-tests
on:
pull_request:
push:
branches:
- master
jobs:
tests:
name: Unit tests and coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v2
with:
go-version: "^1.15.6"
- name: Make dependencies
run: make deps mocks
- name: Compile
run: make linux
- name: Run unit tests
run: go test -race -covermode atomic -coverprofile=coverage.out ./...
- name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1.0.5
- name: Coveralls
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
path-to-lcov: coverage.lcov

5
.gitignore vendored
View File

@ -30,6 +30,7 @@ _testmain.go
/docker/build/ /docker/build/
/github.com/ /github.com/
.idea .idea
.vscode
scm-source.json scm-source.json
@ -47,6 +48,8 @@ __pycache__/
# Distribution / packaging # Distribution / packaging
.Python .Python
ui/app/node_modules
ui/operator_ui/static/build
build/ build/
develop-eggs/ develop-eggs/
dist/ dist/
@ -95,3 +98,5 @@ e2e/manifests
# Translations # Translations
*.mo *.mo
*.pot *.pot
mocks

View File

@ -1,22 +0,0 @@
dist: trusty
sudo: false
branches:
only:
- master
language: go
go:
- "1.14.x"
before_install:
- go get github.com/mattn/goveralls
install:
- make deps
script:
- hack/verify-codegen.sh
- travis_wait 20 goveralls -service=travis-ci -package ./pkg/... -v
- make e2e

View File

@ -1,2 +1,2 @@
# global owners # global owners
* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih * @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih

View File

@ -1,3 +1,5 @@
Oleksii Kliukin <oleksii.kliukin@zalando.de>
Dmitrii Dolgov <dmitrii.dolgov@zalando.de> Dmitrii Dolgov <dmitrii.dolgov@zalando.de>
Sergey Dudoladov <sergey.dudoladov@zalando.de> Sergey Dudoladov <sergey.dudoladov@zalando.de>
Felix Kunde <felix.kunde@zalando.de>
Jan Mussler <jan.mussler@zalando.de>
Rafia Sabih <rafia.sabih@zalando.de>

View File

@ -1,4 +1,4 @@
.PHONY: clean local test linux macos docker push scm-source.json e2e .PHONY: clean local test linux macos mocks docker push scm-source.json e2e
BINARY ?= postgres-operator BINARY ?= postgres-operator
BUILD_FLAGS ?= -v BUILD_FLAGS ?= -v
@ -24,12 +24,16 @@ PKG := `go list ./... | grep -v /vendor/`
ifeq ($(DEBUG),1) ifeq ($(DEBUG),1)
DOCKERFILE = DebugDockerfile DOCKERFILE = DebugDockerfile
DEBUG_POSTFIX := -debug DEBUG_POSTFIX := -debug-$(shell date hhmmss)
BUILD_FLAGS += -gcflags "-N -l" BUILD_FLAGS += -gcflags "-N -l"
else else
DOCKERFILE = Dockerfile DOCKERFILE = Dockerfile
endif endif
ifeq ($(FRESH),1)
DEBUG_FRESH=$(shell date +"%H-%M-%S")
endif
ifdef CDP_PULL_REQUEST_NUMBER ifdef CDP_PULL_REQUEST_NUMBER
CDP_TAG := -${CDP_BUILD_VERSION} CDP_TAG := -${CDP_BUILD_VERSION}
endif endif
@ -66,7 +70,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE} docker-context
echo "Version ${VERSION}" echo "Version ${VERSION}"
echo "CDP tag ${CDP_TAG}" echo "CDP tag ${CDP_TAG}"
echo "git describe $(shell git describe --tags --always --dirty)" echo "git describe $(shell git describe --tags --always --dirty)"
cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" . cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" .
indocker-race: indocker-race:
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.8.1 bash -c "make linux" docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.8.1 bash -c "make linux"
@ -77,9 +81,12 @@ push:
scm-source.json: .git scm-source.json: .git
echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json
mocks:
GO111MODULE=on go generate ./...
tools: tools:
GO111MODULE=on go get -u honnef.co/go/tools/cmd/staticcheck GO111MODULE=on go get k8s.io/client-go@kubernetes-1.19.3
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.16.3 GO111MODULE=on go get github.com/golang/mock/mockgen@v1.4.4
GO111MODULE=on go mod tidy GO111MODULE=on go mod tidy
fmt: fmt:
@ -97,4 +104,4 @@ test:
GO111MODULE=on go test ./... GO111MODULE=on go test ./...
e2e: docker # build operator image to be tested e2e: docker # build operator image to be tested
cd e2e; make tools e2etest clean cd e2e; make e2etest

View File

@ -1,32 +1,34 @@
# Postgres Operator # Postgres Operator
[![Build Status](https://travis-ci.org/zalando/postgres-operator.svg?branch=master)](https://travis-ci.org/zalando/postgres-operator) ![Tests](https://github.com/zalando/postgres-operator/workflows/operator-tests/badge.svg)
[![Coverage Status](https://coveralls.io/repos/github/zalando/postgres-operator/badge.svg)](https://coveralls.io/github/zalando/postgres-operator) ![E2E Tests](https://github.com/zalando/postgres-operator/workflows/operator-e2e-tests/badge.svg)
[![Go Report Card](https://goreportcard.com/badge/github.com/zalando/postgres-operator)](https://goreportcard.com/report/github.com/zalando/postgres-operator) [![Coverage Status](https://coveralls.io/repos/github/zalando/postgres-operator/badge.svg?branch=master)](https://coveralls.io/github/zalando/postgres-operator?branch=master)
[![GoDoc](https://godoc.org/github.com/zalando/postgres-operator?status.svg)](https://godoc.org/github.com/zalando/postgres-operator)
[![golangci](https://golangci.com/badges/github.com/zalando/postgres-operator.svg)](https://golangci.com/r/github.com/zalando/postgres-operator)
<img src="docs/diagrams/logo.png" width="200"> <img src="docs/diagrams/logo.png" width="200">
The Postgres Operator enables highly-available [PostgreSQL](https://www.postgresql.org/) The Postgres Operator delivers an easy to run highly-available [PostgreSQL](https://www.postgresql.org/)
clusters on Kubernetes (K8s) powered by [Patroni](https://github.com/zalando/spilo). clusters on Kubernetes (K8s) powered by [Patroni](https://github.com/zalando/spilo).
It is configured only through manifests to ease integration into automated CI/CD It is configured only through Postgres manifests (CRDs) to ease integration into automated CI/CD
pipelines with no access to Kubernetes directly. pipelines with no access to Kubernetes API directly, promoting infrastructure as code vs manual operations.
### Operator features ### Operator features
* Rolling updates on Postgres cluster changes * Rolling updates on Postgres cluster changes, incl. quick minor version updates
* Volume resize without Pod restarts * Live volume resize without pod restarts (AWS EBS, PVC)
* Database connection pooler * Database connection pooler with PGBouncer
* Cloning Postgres clusters * Restore and cloning Postgres clusters (incl. major version upgrade)
* Logical backups to S3 Bucket * Additionally logical backups to S3 bucket can be configured
* Standby cluster from S3 WAL archive * Standby cluster from S3 WAL archive
* Configurable for non-cloud environments * Configurable for non-cloud environments
* Basic credential and user management on K8s, eases application deployments
* Support for custom TLS certificates
* UI to create and edit Postgres cluster manifests * UI to create and edit Postgres cluster manifests
* Works well on Amazon AWS, Google Cloud, OpenShift and locally on Kind
* Base support for AWS EBS gp3 migration (iops, throughput pending)
### PostgreSQL features ### PostgreSQL features
* Supports PostgreSQL 9.6+ * Supports PostgreSQL 13, starting from 9.5+
* Streaming replication cluster via Patroni * Streaming replication cluster via Patroni
* Point-In-Time-Recovery with * Point-In-Time-Recovery with
[pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) / [pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) /
@ -48,13 +50,35 @@ pipelines with no access to Kubernetes directly.
[timescaledb](https://github.com/timescale/timescaledb) [timescaledb](https://github.com/timescale/timescaledb)
The Postgres Operator has been developed at Zalando and is being used in The Postgres Operator has been developed at Zalando and is being used in
production for over two years. production for over three years.
## Notes on Postgres 13 support
If you are new to the operator, you can skip this and just start using the Postgres operator as is, Postgres 13 is ready to go.
The Postgres operator supports Postgres 13 with the new Spilo Image that includes also the recent Patroni version to support PG13 settings.
More work on optimizing restarts and rolling upgrades is pending.
If you are already using the Postgres operator in older version with a Spilo 12 Docker image you need to be aware of the changes for the backup path.
We introduce the major version into the backup path to smoothen the [major version upgrade](docs/administrator.md#minor-and-major-version-upgrade) that is now supported manually.
The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths.
This comes at potential performance costs and should be disabled after a few days.
The new Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p2`
The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5`
## Getting started ## Getting started
For a quick first impression follow the instructions of this For a quick first impression follow the instructions of this
[tutorial](docs/quickstart.md). [tutorial](docs/quickstart.md).
## Supported setups of Postgres and Applications
![Features](docs/diagrams/neutral_operator.png)
## Documentation ## Documentation
There is a browser-friendly version of this documentation at There is a browser-friendly version of this documentation at
@ -70,12 +94,6 @@ There is a browser-friendly version of this documentation at
* [Postgres manifest reference](docs/reference/cluster_manifest.md) * [Postgres manifest reference](docs/reference/cluster_manifest.md)
* [Command-line options and environment variables](docs/reference/command_line_and_environment.md) * [Command-line options and environment variables](docs/reference/command_line_and_environment.md)
## Google Summer of Code
The Postgres Operator made it to the [Google Summer of Code 2019](https://summerofcode.withgoogle.com/organizations/5429926902104064/)!
Check [our ideas](docs/gsoc-2019/ideas.md#google-summer-of-code-2019)
and start discussions in [the issue tracker](https://github.com/zalando/postgres-operator/issues).
## Community ## Community
There are two places to get in touch with the community: There are two places to get in touch with the community:

View File

@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: postgres-operator-ui name: postgres-operator-ui
version: 1.5.0 version: 1.6.0
appVersion: 1.5.0 appVersion: 1.6.0
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
keywords: keywords:

View File

@ -2,11 +2,34 @@ apiVersion: v1
entries: entries:
postgres-operator-ui: postgres-operator-ui:
- apiVersion: v1 - apiVersion: v1
appVersion: 1.5.0 appVersion: 1.6.0
created: "2020-05-08T12:07:31.651762139+02:00" created: "2020-12-18T14:19:25.464717041+01:00"
description: Postgres Operator UI provides a graphical interface for a convenient description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience database-as-a-service user experience
digest: d7a36de8a3716f7b7954e2e51ed07863ea764dbb129a2fd3ac6a453f9e98a115 digest: d7813a235dd1015377c38fd5a14e7679a411c7340a25cfcf5f5294405f9a2eb2
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- ui
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator-ui
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-ui-1.6.0.tgz
version: 1.6.0
- apiVersion: v1
appVersion: 1.5.0
created: "2020-12-18T14:19:25.464015993+01:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: c91ea39e6d51d57f4048fb1b6ec53b40823f2690eb88e4e4f1a036367b9fdd61
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
- postgres - postgres
@ -24,29 +47,4 @@ entries:
urls: urls:
- postgres-operator-ui-1.5.0.tgz - postgres-operator-ui-1.5.0.tgz
version: 1.5.0 version: 1.5.0
- apiVersion: v1 generated: "2020-12-18T14:19:25.463104102+01:00"
appVersion: 1.4.0
created: "2020-05-08T12:07:31.651223951+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: 00e0eff7056d56467cd5c975657fbb76c8d01accd25a4b7aca81bc42aeac961d
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- ui
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
- email: sk@sik-net.de
name: siku4
name: postgres-operator-ui
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-ui-1.4.0.tgz
version: 1.4.0
generated: "2020-05-08T12:07:31.650495247+02:00"

View File

@ -21,6 +21,10 @@ spec:
team: "acid" # Parameterize? team: "acid" # Parameterize?
spec: spec:
serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }} serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
containers: containers:
- name: "service" - name: "service"
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
@ -46,7 +50,7 @@ spec:
- name: "RESOURCES_VISIBLE" - name: "RESOURCES_VISIBLE"
value: "{{ .Values.envs.resourcesVisible }}" value: "{{ .Values.envs.resourcesVisible }}"
- name: "TARGET_NAMESPACE" - name: "TARGET_NAMESPACE"
value: {{ .Values.envs.targetNamespace }} value: "{{ .Values.envs.targetNamespace }}"
- name: "TEAMS" - name: "TEAMS"
value: |- value: |-
[ [
@ -64,10 +68,8 @@ spec:
"resources_visible": true, "resources_visible": true,
"users_visible": true, "users_visible": true,
"postgresql_versions": [ "postgresql_versions": [
"13",
"12", "12",
"11", "11"
"10",
"9.6",
"9.5"
] ]
} }

View File

@ -8,9 +8,15 @@ replicaCount: 1
image: image:
registry: registry.opensource.zalan.do registry: registry.opensource.zalan.do
repository: acid/postgres-operator-ui repository: acid/postgres-operator-ui
tag: v1.5.0-dirty tag: v1.6.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets:
# - name:
rbac: rbac:
# Specifies whether RBAC resources should be created # Specifies whether RBAC resources should be created
create: true create: true
@ -43,7 +49,7 @@ envs:
# configure UI service # configure UI service
service: service:
type: "ClusterIP" type: "ClusterIP"
port: "8081" port: "80"
# If the type of the service is NodePort a port can be specified using the nodePort field # If the type of the service is NodePort a port can be specified using the nodePort field
# If the nodePort field is not specified, or if it has no value, then a random port is used # If the nodePort field is not specified, or if it has no value, then a random port is used
# notePort: 32521 # notePort: 32521

View File

@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: postgres-operator name: postgres-operator
version: 1.5.0 version: 1.6.0
appVersion: 1.5.0 appVersion: 1.6.0
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
keywords: keywords:

View File

@ -1,4 +1,4 @@
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
name: operatorconfigurations.acid.zalan.do name: operatorconfigurations.acid.zalan.do
@ -15,365 +15,441 @@ spec:
singular: operatorconfiguration singular: operatorconfiguration
shortNames: shortNames:
- opconfig - opconfig
additionalPrinterColumns: categories:
- name: Image - all
type: string
description: Spilo image to be used for Pods
JSONPath: .configuration.docker_image
- name: Cluster-Label
type: string
description: Label for K8s resources created by operator
JSONPath: .configuration.kubernetes.cluster_name_label
- name: Service-Account
type: string
description: Name of service account to be used
JSONPath: .configuration.kubernetes.pod_service_account_name
- name: Min-Instances
type: integer
description: Minimum number of instances per Postgres cluster
JSONPath: .configuration.min_instances
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
scope: Namespaced scope: Namespaced
subresources: versions:
status: {} - name: v1
version: v1 served: true
validation: storage: true
openAPIV3Schema: subresources:
type: object status: {}
required: additionalPrinterColumns:
- kind - name: Image
- apiVersion type: string
- configuration description: Spilo image to be used for Pods
properties: jsonPath: .configuration.docker_image
kind: - name: Cluster-Label
type: string type: string
enum: description: Label for K8s resources created by operator
- OperatorConfiguration jsonPath: .configuration.kubernetes.cluster_name_label
apiVersion: - name: Service-Account
type: string type: string
enum: description: Name of service account to be used
- acid.zalan.do/v1 jsonPath: .configuration.kubernetes.pod_service_account_name
configuration: - name: Min-Instances
type: object type: integer
properties: description: Minimum number of instances per Postgres cluster
docker_image: jsonPath: .configuration.min_instances
type: string - name: Age
enable_crd_validation: type: date
type: boolean jsonPath: .metadata.creationTimestamp
enable_lazy_spilo_upgrade: schema:
type: boolean openAPIV3Schema:
enable_shm_volume: type: object
type: boolean required:
etcd_host: - kind
type: string - apiVersion
kubernetes_use_configmaps: - configuration
type: boolean properties:
max_instances: kind:
type: integer
minimum: -1 # -1 = disabled
min_instances:
type: integer
minimum: -1 # -1 = disabled
resync_period:
type: string
repair_period:
type: string
set_memory_request_to_limit:
type: boolean
sidecar_docker_images:
type: object
additionalProperties:
type: string
sidecars:
type: array
nullable: true
items:
type: object
additionalProperties: true
workers:
type: integer
minimum: 1
users:
type: object
properties:
replication_username:
type: string
super_username:
type: string
kubernetes:
type: object
properties:
cluster_domain:
type: string
cluster_labels:
type: object
additionalProperties:
type: string
cluster_name_label:
type: string
custom_pod_annotations:
type: object
additionalProperties:
type: string
downscaler_annotations:
type: array
items:
type: string
enable_init_containers:
type: boolean
enable_pod_antiaffinity:
type: boolean
enable_pod_disruption_budget:
type: boolean
enable_sidecars:
type: boolean
infrastructure_roles_secret_name:
type: string
inherited_labels:
type: array
items:
type: string
master_pod_move_timeout:
type: string
node_readiness_label:
type: object
additionalProperties:
type: string
oauth_token_secret_name:
type: string
pdb_name_format:
type: string
pod_antiaffinity_topology_key:
type: string
pod_environment_configmap:
type: string
pod_management_policy:
type: string
enum:
- "ordered_ready"
- "parallel"
pod_priority_class_name:
type: string
pod_role_label:
type: string
pod_service_account_definition:
type: string
pod_service_account_name:
type: string
pod_service_account_role_binding_definition:
type: string
pod_terminate_grace_period:
type: string
secret_name_template:
type: string
spilo_fsgroup:
type: integer
spilo_privileged:
type: boolean
toleration:
type: object
additionalProperties:
type: string
watched_namespace:
type: string
postgres_pod_resources:
type: object
properties:
default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
min_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
min_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
timeouts:
type: object
properties:
pod_label_wait_timeout:
type: string
pod_deletion_wait_timeout:
type: string
ready_wait_interval:
type: string
ready_wait_timeout:
type: string
resource_check_interval:
type: string
resource_check_timeout:
type: string
load_balancer:
type: object
properties:
custom_service_annotations:
type: object
additionalProperties:
type: string
db_hosted_zone:
type: string
enable_master_load_balancer:
type: boolean
enable_replica_load_balancer:
type: boolean
master_dns_name_format:
type: string
replica_dns_name_format:
type: string
aws_or_gcp:
type: object
properties:
additional_secret_mount:
type: string
additional_secret_mount_path:
type: string
aws_region:
type: string
kube_iam_role:
type: string
log_s3_bucket:
type: string
wal_s3_bucket:
type: string
logical_backup:
type: object
properties:
logical_backup_docker_image:
type: string
logical_backup_s3_access_key_id:
type: string
logical_backup_s3_bucket:
type: string
logical_backup_s3_endpoint:
type: string
logical_backup_s3_region:
type: string
logical_backup_s3_secret_access_key:
type: string
logical_backup_s3_sse:
type: string
logical_backup_schedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
debug:
type: object
properties:
debug_logging:
type: boolean
enable_database_access:
type: boolean
teams_api:
type: object
properties:
enable_admin_role_for_users:
type: boolean
enable_team_superuser:
type: boolean
enable_teams_api:
type: boolean
pam_configuration:
type: string
pam_role_name:
type: string
postgres_superuser_teams:
type: array
items:
type: string
protected_role_names:
type: array
items:
type: string
team_admin_role:
type: string
team_api_role_configuration:
type: object
additionalProperties:
type: string
teams_api_url:
type: string
logging_rest_api:
type: object
properties:
api_port:
type: integer
cluster_history_entries:
type: integer
ring_log_lines:
type: integer
scalyr: # deprecated
type: object
properties:
scalyr_api_key:
type: string
scalyr_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
scalyr_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
scalyr_image:
type: string
scalyr_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
scalyr_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
scalyr_server_url:
type: string
connection_pooler:
type: object
properties:
connection_pooler_schema:
type: string
#default: "pooler"
connection_pooler_user:
type: string
#default: "pooler"
connection_pooler_image:
type: string
#default: "registry.opensource.zalan.do/acid/pgbouncer"
connection_pooler_max_db_connections:
type: integer
#default: 60
connection_pooler_mode:
type: string
enum:
- "session"
- "transaction"
#default: "transaction"
connection_pooler_number_of_instances:
type: integer
minimum: 2
#default: 2
connection_pooler_default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
#default: "1"
connection_pooler_default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
#default: "500m"
connection_pooler_default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
#default: "100Mi"
connection_pooler_default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
#default: "100Mi"
status:
type: object
additionalProperties:
type: string type: string
enum:
- OperatorConfiguration
apiVersion:
type: string
enum:
- acid.zalan.do/v1
configuration:
type: object
properties:
docker_image:
type: string
enable_crd_validation:
type: boolean
enable_lazy_spilo_upgrade:
type: boolean
enable_pgversion_env_var:
type: boolean
enable_shm_volume:
type: boolean
enable_spilo_wal_path_compat:
type: boolean
etcd_host:
type: string
kubernetes_use_configmaps:
type: boolean
max_instances:
type: integer
minimum: -1 # -1 = disabled
min_instances:
type: integer
minimum: -1 # -1 = disabled
resync_period:
type: string
repair_period:
type: string
set_memory_request_to_limit:
type: boolean
sidecar_docker_images:
type: object
additionalProperties:
type: string
sidecars:
type: array
nullable: true
items:
type: object
x-kubernetes-preserve-unknown-fields: true
workers:
type: integer
minimum: 1
users:
type: object
properties:
replication_username:
type: string
super_username:
type: string
kubernetes:
type: object
properties:
cluster_domain:
type: string
cluster_labels:
type: object
additionalProperties:
type: string
cluster_name_label:
type: string
custom_pod_annotations:
type: object
additionalProperties:
type: string
delete_annotation_date_key:
type: string
delete_annotation_name_key:
type: string
downscaler_annotations:
type: array
items:
type: string
enable_init_containers:
type: boolean
enable_pod_antiaffinity:
type: boolean
enable_pod_disruption_budget:
type: boolean
enable_sidecars:
type: boolean
infrastructure_roles_secret_name:
type: string
infrastructure_roles_secrets:
type: array
nullable: true
items:
type: object
required:
- secretname
- userkey
- passwordkey
properties:
secretname:
type: string
userkey:
type: string
passwordkey:
type: string
rolekey:
type: string
defaultuservalue:
type: string
defaultrolevalue:
type: string
details:
type: string
template:
type: boolean
inherited_annotations:
type: array
items:
type: string
inherited_labels:
type: array
items:
type: string
master_pod_move_timeout:
type: string
node_readiness_label:
type: object
additionalProperties:
type: string
oauth_token_secret_name:
type: string
pdb_name_format:
type: string
pod_antiaffinity_topology_key:
type: string
pod_environment_configmap:
type: string
pod_environment_secret:
type: string
pod_management_policy:
type: string
enum:
- "ordered_ready"
- "parallel"
pod_priority_class_name:
type: string
pod_role_label:
type: string
pod_service_account_definition:
type: string
pod_service_account_name:
type: string
pod_service_account_role_binding_definition:
type: string
pod_terminate_grace_period:
type: string
secret_name_template:
type: string
spilo_runasuser:
type: integer
spilo_runasgroup:
type: integer
spilo_fsgroup:
type: integer
spilo_privileged:
type: boolean
storage_resize_mode:
type: string
enum:
- "ebs"
- "pvc"
- "off"
toleration:
type: object
additionalProperties:
type: string
watched_namespace:
type: string
postgres_pod_resources:
type: object
properties:
default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
min_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
min_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
timeouts:
type: object
properties:
pod_label_wait_timeout:
type: string
pod_deletion_wait_timeout:
type: string
ready_wait_interval:
type: string
ready_wait_timeout:
type: string
resource_check_interval:
type: string
resource_check_timeout:
type: string
load_balancer:
type: object
properties:
custom_service_annotations:
type: object
additionalProperties:
type: string
db_hosted_zone:
type: string
enable_master_load_balancer:
type: boolean
enable_replica_load_balancer:
type: boolean
external_traffic_policy:
type: string
enum:
- "Cluster"
- "Local"
master_dns_name_format:
type: string
replica_dns_name_format:
type: string
aws_or_gcp:
type: object
properties:
additional_secret_mount:
type: string
additional_secret_mount_path:
type: string
aws_region:
type: string
enable_ebs_gp3_migration:
type: boolean
enable_ebs_gp3_migration_max_size:
type: integer
gcp_credentials:
type: string
kube_iam_role:
type: string
log_s3_bucket:
type: string
wal_gs_bucket:
type: string
wal_s3_bucket:
type: string
logical_backup:
type: object
properties:
logical_backup_docker_image:
type: string
logical_backup_google_application_credentials:
type: string
logical_backup_provider:
type: string
logical_backup_s3_access_key_id:
type: string
logical_backup_s3_bucket:
type: string
logical_backup_s3_endpoint:
type: string
logical_backup_s3_region:
type: string
logical_backup_s3_secret_access_key:
type: string
logical_backup_s3_sse:
type: string
logical_backup_schedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
debug:
type: object
properties:
debug_logging:
type: boolean
enable_database_access:
type: boolean
teams_api:
type: object
properties:
enable_admin_role_for_users:
type: boolean
enable_postgres_team_crd:
type: boolean
enable_postgres_team_crd_superusers:
type: boolean
enable_team_superuser:
type: boolean
enable_teams_api:
type: boolean
pam_configuration:
type: string
pam_role_name:
type: string
postgres_superuser_teams:
type: array
items:
type: string
protected_role_names:
type: array
items:
type: string
team_admin_role:
type: string
team_api_role_configuration:
type: object
additionalProperties:
type: string
teams_api_url:
type: string
logging_rest_api:
type: object
properties:
api_port:
type: integer
cluster_history_entries:
type: integer
ring_log_lines:
type: integer
scalyr: # deprecated
type: object
properties:
scalyr_api_key:
type: string
scalyr_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
scalyr_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
scalyr_image:
type: string
scalyr_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
scalyr_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
scalyr_server_url:
type: string
connection_pooler:
type: object
properties:
connection_pooler_schema:
type: string
#default: "pooler"
connection_pooler_user:
type: string
#default: "pooler"
connection_pooler_image:
type: string
#default: "registry.opensource.zalan.do/acid/pgbouncer"
connection_pooler_max_db_connections:
type: integer
#default: 60
connection_pooler_mode:
type: string
enum:
- "session"
- "transaction"
#default: "transaction"
connection_pooler_number_of_instances:
type: integer
minimum: 2
#default: 2
connection_pooler_default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
#default: "1"
connection_pooler_default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
#default: "500m"
connection_pooler_default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
#default: "100Mi"
connection_pooler_default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
#default: "100Mi"
status:
type: object
additionalProperties:
type: string

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,72 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: postgresteams.acid.zalan.do
labels:
app.kubernetes.io/name: postgres-operator
annotations:
"helm.sh/hook": crd-install
spec:
group: acid.zalan.do
names:
kind: PostgresTeam
listKind: PostgresTeamList
plural: postgresteams
singular: postgresteam
shortNames:
- pgteam
categories:
- all
scope: Namespaced
versions:
- name: v1
served: true
storage: true
subresources:
status: {}
schema:
openAPIV3Schema:
type: object
required:
- kind
- apiVersion
- spec
properties:
kind:
type: string
enum:
- PostgresTeam
apiVersion:
type: string
enum:
- acid.zalan.do/v1
spec:
type: object
properties:
additionalSuperuserTeams:
type: object
description: "Map for teamId and associated additional superuser teams"
additionalProperties:
type: array
nullable: true
description: "List of teams to become Postgres superusers"
items:
type: string
additionalTeams:
type: object
description: "Map for teamId and associated additional teams"
additionalProperties:
type: array
nullable: true
description: "List of teams whose members will also be added to the Postgres cluster"
items:
type: string
additionalMembers:
type: object
description: "Map for teamId and associated additional users"
additionalProperties:
type: array
nullable: true
description: "List of users who will also be added to the Postgres cluster"
items:
type: string

View File

@ -2,11 +2,33 @@ apiVersion: v1
entries: entries:
postgres-operator: postgres-operator:
- apiVersion: v1 - apiVersion: v1
appVersion: 1.5.0 appVersion: 1.6.0
created: "2020-05-04T16:36:19.646719041+02:00" created: "2020-12-17T16:16:25.639708821+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes in Kubernetes
digest: 43510e4ed7005b2b80708df24cfbb0099b263b4a2954cff4e8f305543760be6d digest: 2f5f527bae0a22b02f2f7b1e2352665cecf489a990e18212444fa34450b97604
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-1.6.0.tgz
version: 1.6.0
- apiVersion: v1
appVersion: 1.5.0
created: "2020-12-17T16:16:25.637262877+01:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 198351d5db52e65cdf383d6f3e1745d91ac1e2a01121f8476f8b1be728b09531
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
- postgres - postgres
@ -23,26 +45,4 @@ entries:
urls: urls:
- postgres-operator-1.5.0.tgz - postgres-operator-1.5.0.tgz
version: 1.5.0 version: 1.5.0
- apiVersion: v1 generated: "2020-12-17T16:16:25.635647131+01:00"
appVersion: 1.4.0
created: "2020-05-04T16:36:19.645338751+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: f8b90fecfc3cb825b94ed17edd9d5cefc36ae61801d4568597b4a79bcd73b2e9
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-1.4.0.tgz
version: 1.4.0
generated: "2020-05-04T16:36:19.643857452+02:00"

Binary file not shown.

View File

@ -10,6 +10,27 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
rules: rules:
# Patroni needs to watch and manage endpoints # Patroni needs to watch and manage endpoints
{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }}
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
{{- else }}
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -23,6 +44,7 @@ rules:
- patch - patch
- update - update
- watch - watch
{{- end }}
# Patroni needs to watch pods # Patroni needs to watch pods
- apiGroups: - apiGroups:
- "" - ""

View File

@ -25,6 +25,15 @@ rules:
- patch - patch
- update - update
- watch - watch
# operator only reads PostgresTeams
- apiGroups:
- acid.zalan.do
resources:
- postgresteams
verbs:
- get
- list
- watch
# to create or get/update CRDs when starting up # to create or get/update CRDs when starting up
- apiGroups: - apiGroups:
- apiextensions.k8s.io - apiextensions.k8s.io
@ -35,13 +44,6 @@ rules:
- get - get
- patch - patch
- update - update
# to read configuration from ConfigMaps
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
# to send events to the CRs # to send events to the CRs
- apiGroups: - apiGroups:
- "" - ""
@ -54,7 +56,35 @@ rules:
- patch - patch
- update - update
- watch - watch
# to manage endpoints which are also used by Patroni # to manage endpoints/configmaps which are also used by Patroni
{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }}
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
{{- else }}
# to read configuration from ConfigMaps
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -68,6 +98,7 @@ rules:
- patch - patch
- update - update
- watch - watch
{{- end }}
# to CRUD secrets for database access # to CRUD secrets for database access
- apiGroups: - apiGroups:
- "" - ""
@ -96,6 +127,10 @@ rules:
- delete - delete
- get - get
- list - list
{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }}
- patch
- update
{{- end }}
# to read existing PVs. Creation should be done via dynamic provisioning # to read existing PVs. Creation should be done via dynamic provisioning
- apiGroups: - apiGroups:
- "" - ""
@ -104,7 +139,9 @@ rules:
verbs: verbs:
- get - get
- list - list
{{- if toString .Values.configKubernetes.storage_resize_mode | eq "ebs" }}
- update # only for resizing AWS volumes - update # only for resizing AWS volumes
{{- end }}
# to watch Spilo pods and do rolling updates. Creation via StatefulSet # to watch Spilo pods and do rolling updates. Creation via StatefulSet
- apiGroups: - apiGroups:
- "" - ""

View File

@ -9,6 +9,9 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
data: data:
{{- if .Values.podPriorityClassName }}
pod_priority_class_name: {{ .Values.podPriorityClassName }}
{{- end }}
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
{{ toYaml .Values.configGeneral | indent 2 }} {{ toYaml .Values.configGeneral | indent 2 }}
{{ toYaml .Values.configUsers | indent 2 }} {{ toYaml .Values.configUsers | indent 2 }}

View File

@ -37,6 +37,10 @@ spec:
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
env: env:
{{- if .Values.enableJsonLogging }}
- name: ENABLE_JSON_LOGGING
value: "true"
{{- end }}
{{- if eq .Values.configTarget "ConfigMap" }} {{- if eq .Values.configTarget "ConfigMap" }}
- name: CONFIG_MAP_NAME - name: CONFIG_MAP_NAME
value: {{ template "postgres-operator.fullname" . }} value: {{ template "postgres-operator.fullname" . }}

View File

@ -13,6 +13,9 @@ configuration:
users: users:
{{ toYaml .Values.configUsers | indent 4 }} {{ toYaml .Values.configUsers | indent 4 }}
kubernetes: kubernetes:
{{- if .Values.podPriorityClassName }}
pod_priority_class_name: {{ .Values.podPriorityClassName }}
{{- end }}
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }} oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
{{ toYaml .Values.configKubernetes | indent 4 }} {{ toYaml .Values.configKubernetes | indent 4 }}

View File

@ -0,0 +1,15 @@
{{- if .Values.podPriorityClassName }}
apiVersion: scheduling.k8s.io/v1
description: 'Use only for databases controlled by Postgres operator'
kind: PriorityClass
metadata:
labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ .Values.podPriorityClassName }}
preemptionPolicy: PreemptLowerPriority
globalDefault: false
value: 1000000
{{- end }}

View File

@ -1,7 +1,7 @@
image: image:
registry: registry.opensource.zalan.do registry: registry.opensource.zalan.do
repository: acid/postgres-operator repository: acid/postgres-operator
tag: v1.5.0 tag: v1.6.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets. # Optionally specify an array of imagePullSecrets.
@ -21,14 +21,18 @@ configGeneral:
enable_crd_validation: true enable_crd_validation: true
# update only the statefulsets without immediately doing the rolling update # update only the statefulsets without immediately doing the rolling update
enable_lazy_spilo_upgrade: false enable_lazy_spilo_upgrade: false
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
enable_pgversion_env_var: true
# start any new database pod without limitations on shm memory # start any new database pod without limitations on shm memory
enable_shm_volume: true enable_shm_volume: true
# enables backwards compatible path between Spilo 12 and Spilo 13 images
enable_spilo_wal_path_compat: false
# etcd connection string for Patroni. Empty uses K8s-native DCS. # etcd connection string for Patroni. Empty uses K8s-native DCS.
etcd_host: "" etcd_host: ""
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s) # Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
# kubernetes_use_configmaps: false # kubernetes_use_configmaps: false
# Spilo docker image # Spilo docker image
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
# max number of instances in Postgres cluster. -1 = no limit # max number of instances in Postgres cluster. -1 = no limit
min_instances: -1 min_instances: -1
# min number of instances in Postgres cluster. -1 = no limit # min number of instances in Postgres cluster. -1 = no limit
@ -45,7 +49,7 @@ configGeneral:
# example: "exampleimage:exampletag" # example: "exampleimage:exampletag"
# number of routines the operator spawns to process requests concurrently # number of routines the operator spawns to process requests concurrently
workers: 4 workers: 8
# parameters describing Postgres users # parameters describing Postgres users
configUsers: configUsers:
@ -67,6 +71,12 @@ configKubernetes:
# keya: valuea # keya: valuea
# keyb: valueb # keyb: valueb
# key name for annotation that compares manifest value with current date
# delete_annotation_date_key: "delete-date"
# key name for annotation that compares manifest value with cluster name
# delete_annotation_name_key: "delete-clustername"
# list of annotations propagated from cluster manifest to statefulset and deployment # list of annotations propagated from cluster manifest to statefulset and deployment
# downscaler_annotations: # downscaler_annotations:
# - deployment-time # - deployment-time
@ -83,7 +93,11 @@ configKubernetes:
# namespaced name of the secret containing infrastructure roles names and passwords # namespaced name of the secret containing infrastructure roles names and passwords
# infrastructure_roles_secret_name: postgresql-infrastructure-roles # infrastructure_roles_secret_name: postgresql-infrastructure-roles
# list of labels that can be inherited from the cluster manifest # list of annotation keys that can be inherited from the cluster manifest
# inherited_annotations:
# - owned-by
# list of label keys that can be inherited from the cluster manifest
# inherited_labels: # inherited_labels:
# - application # - application
# - environment # - environment
@ -104,6 +118,8 @@ configKubernetes:
pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# namespaced name of the ConfigMap with environment variables to populate on every pod # namespaced name of the ConfigMap with environment variables to populate on every pod
# pod_environment_configmap: "default/my-custom-config" # pod_environment_configmap: "default/my-custom-config"
# name of the Secret (in cluster namespace) with environment variables to populate on every pod
# pod_environment_secret: "my-custom-secret"
# specify the pod management policy of stateful sets of Postgres clusters # specify the pod management policy of stateful sets of Postgres clusters
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
@ -119,11 +135,16 @@ configKubernetes:
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator # template for database user secrets generated by the operator
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# set user and group for the spilo container (required to run Spilo as non-root process)
# spilo_runasuser: "101"
# spilo_runasgroup: "103"
# group ID with write-access to volumes (required to run Spilo as non-root process) # group ID with write-access to volumes (required to run Spilo as non-root process)
# spilo_fsgroup: 103 # spilo_fsgroup: 103
# whether the Spilo container should run in privileged mode # whether the Spilo container should run in privileged mode
spilo_privileged: false spilo_privileged: false
# storage resize strategy, available options are: ebs, pvc, off
storage_resize_mode: pvc
# operator watches for postgres objects in the given namespace # operator watches for postgres objects in the given namespace
watched_namespace: "*" # listen to all namespaces watched_namespace: "*" # listen to all namespaces
@ -170,6 +191,8 @@ configLoadBalancer:
enable_master_load_balancer: false enable_master_load_balancer: false
# toggles service type load balancer pointing to the replica pod of the cluster # toggles service type load balancer pointing to the replica pod of the cluster
enable_replica_load_balancer: false enable_replica_load_balancer: false
# define external traffic policy for the load balancer
external_traffic_policy: "Cluster"
# defines the DNS name string template for the master load balancer cluster # defines the DNS name string template for the master load balancer cluster
master_dns_name_format: "{cluster}.{team}.{hostedzone}" master_dns_name_format: "{cluster}.{team}.{hostedzone}"
# defines the DNS name string template for the replica load balancer cluster # defines the DNS name string template for the replica load balancer cluster
@ -202,19 +225,30 @@ configAwsOrGcp:
# AWS region used to store ESB volumes # AWS region used to store ESB volumes
aws_region: eu-central-1 aws_region: eu-central-1
# enable automatic migration on AWS from gp2 to gp3 volumes
enable_ebs_gp3_migration: false
# defines maximum volume size in GB until which auto migration happens
# enable_ebs_gp3_migration_max_size: 1000
# GCP credentials that will be used by the operator / pods
# gcp_credentials: ""
# AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods
# kube_iam_role: "" # kube_iam_role: ""
# S3 bucket to use for shipping postgres daily logs # S3 bucket to use for shipping postgres daily logs
# log_s3_bucket: "" # log_s3_bucket: ""
# GCS bucket to use for shipping WAL segments with WAL-E
# wal_gs_bucket: ""
# S3 bucket to use for shipping WAL segments with WAL-E # S3 bucket to use for shipping WAL segments with WAL-E
# wal_s3_bucket: "" # wal_s3_bucket: ""
# configure K8s cron job managed by the operator # configure K8s cron job managed by the operator
configLogicalBackup: configLogicalBackup:
# image for pods of the logical backup job (example runs pg_dumpall) # image for pods of the logical backup job (example runs pg_dumpall)
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:master-58" logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0"
# S3 Access Key ID # S3 Access Key ID
logical_backup_s3_access_key_id: "" logical_backup_s3_access_key_id: ""
# S3 bucket to store backup results # S3 bucket to store backup results
@ -235,6 +269,11 @@ configTeamsApi:
# team_admin_role will have the rights to grant roles coming from PG manifests # team_admin_role will have the rights to grant roles coming from PG manifests
# enable_admin_role_for_users: true # enable_admin_role_for_users: true
# operator watches for PostgresTeam CRs to assign additional teams and members to clusters
enable_postgres_team_crd: false
# toogle to create additional superuser teams from PostgresTeam CRs
# enable_postgres_team_crd_superusers: false
# toggle to grant superuser to team members created from the Teams API # toggle to grant superuser to team members created from the Teams API
enable_team_superuser: false enable_team_superuser: false
# toggles usage of the Teams API by the operator # toggles usage of the Teams API by the operator
@ -265,7 +304,7 @@ configConnectionPooler:
# db user for pooler to use # db user for pooler to use
connection_pooler_user: "pooler" connection_pooler_user: "pooler"
# docker image # docker image
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7" connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9"
# max db connections the pooler should hold # max db connections the pooler should hold
connection_pooler_max_db_connections: 60 connection_pooler_max_db_connections: 60
# default pooling mode # default pooling mode
@ -299,8 +338,12 @@ podServiceAccount:
# If not set a name is generated using the fullname template and "-pod" suffix # If not set a name is generated using the fullname template and "-pod" suffix
name: "postgres-pod" name: "postgres-pod"
# priority class for operator pod
priorityClassName: "" priorityClassName: ""
# priority class for database pods
podPriorityClassName: ""
resources: resources:
limits: limits:
cpu: 500m cpu: 500m

View File

@ -1,7 +1,7 @@
image: image:
registry: registry.opensource.zalan.do registry: registry.opensource.zalan.do
repository: acid/postgres-operator repository: acid/postgres-operator
tag: v1.5.0 tag: v1.6.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets. # Optionally specify an array of imagePullSecrets.
@ -15,20 +15,27 @@ podLabels: {}
configTarget: "ConfigMap" configTarget: "ConfigMap"
# JSON logging format
enableJsonLogging: false
# general configuration parameters # general configuration parameters
configGeneral: configGeneral:
# choose if deployment creates/updates CRDs with OpenAPIV3Validation # choose if deployment creates/updates CRDs with OpenAPIV3Validation
enable_crd_validation: "true" enable_crd_validation: "true"
# update only the statefulsets without immediately doing the rolling update # update only the statefulsets without immediately doing the rolling update
enable_lazy_spilo_upgrade: "false" enable_lazy_spilo_upgrade: "false"
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
enable_pgversion_env_var: "true"
# start any new database pod without limitations on shm memory # start any new database pod without limitations on shm memory
enable_shm_volume: "true" enable_shm_volume: "true"
# enables backwards compatible path between Spilo 12 and Spilo 13 images
enable_spilo_wal_path_compat: "false"
# etcd connection string for Patroni. Empty uses K8s-native DCS. # etcd connection string for Patroni. Empty uses K8s-native DCS.
etcd_host: "" etcd_host: ""
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s) # Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
# kubernetes_use_configmaps: "false" # kubernetes_use_configmaps: "false"
# Spilo docker image # Spilo docker image
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
# max number of instances in Postgres cluster. -1 = no limit # max number of instances in Postgres cluster. -1 = no limit
min_instances: "-1" min_instances: "-1"
# min number of instances in Postgres cluster. -1 = no limit # min number of instances in Postgres cluster. -1 = no limit
@ -44,7 +51,7 @@ configGeneral:
# sidecar_docker_images: "" # sidecar_docker_images: ""
# number of routines the operator spawns to process requests concurrently # number of routines the operator spawns to process requests concurrently
workers: "4" workers: "8"
# parameters describing Postgres users # parameters describing Postgres users
configUsers: configUsers:
@ -63,6 +70,12 @@ configKubernetes:
# annotations attached to each database pod # annotations attached to each database pod
# custom_pod_annotations: "keya:valuea,keyb:valueb" # custom_pod_annotations: "keya:valuea,keyb:valueb"
# key name for annotation that compares manifest value with current date
# delete_annotation_date_key: "delete-date"
# key name for annotation that compares manifest value with cluster name
# delete_annotation_name_key: "delete-clustername"
# list of annotations propagated from cluster manifest to statefulset and deployment # list of annotations propagated from cluster manifest to statefulset and deployment
# downscaler_annotations: "deployment-time,downscaler/*" # downscaler_annotations: "deployment-time,downscaler/*"
@ -77,7 +90,10 @@ configKubernetes:
# namespaced name of the secret containing infrastructure roles names and passwords # namespaced name of the secret containing infrastructure roles names and passwords
# infrastructure_roles_secret_name: postgresql-infrastructure-roles # infrastructure_roles_secret_name: postgresql-infrastructure-roles
# list of labels that can be inherited from the cluster manifest # list of annotation keys that can be inherited from the cluster manifest
# inherited_annotations: owned-by
# list of label keys that can be inherited from the cluster manifest
# inherited_labels: application,environment # inherited_labels: application,environment
# timeout for successful migration of master pods from unschedulable node # timeout for successful migration of master pods from unschedulable node
@ -95,6 +111,8 @@ configKubernetes:
pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# namespaced name of the ConfigMap with environment variables to populate on every pod # namespaced name of the ConfigMap with environment variables to populate on every pod
# pod_environment_configmap: "default/my-custom-config" # pod_environment_configmap: "default/my-custom-config"
# name of the Secret (in cluster namespace) with environment variables to populate on every pod
# pod_environment_secret: "my-custom-secret"
# specify the pod management policy of stateful sets of Postgres clusters # specify the pod management policy of stateful sets of Postgres clusters
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
@ -110,11 +128,16 @@ configKubernetes:
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator # template for database user secrets generated by the operator
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# set user and group for the spilo container (required to run Spilo as non-root process)
# spilo_runasuser: "101"
# spilo_runasgroup: "103"
# group ID with write-access to volumes (required to run Spilo as non-root process) # group ID with write-access to volumes (required to run Spilo as non-root process)
# spilo_fsgroup: "103" # spilo_fsgroup: "103"
# whether the Spilo container should run in privileged mode # whether the Spilo container should run in privileged mode
spilo_privileged: "false" spilo_privileged: "false"
# storage resize strategy, available options are: ebs, pvc, off
storage_resize_mode: pvc
# operator watches for postgres objects in the given namespace # operator watches for postgres objects in the given namespace
watched_namespace: "*" # listen to all namespaces watched_namespace: "*" # listen to all namespaces
@ -159,6 +182,8 @@ configLoadBalancer:
enable_master_load_balancer: "false" enable_master_load_balancer: "false"
# toggles service type load balancer pointing to the replica pod of the cluster # toggles service type load balancer pointing to the replica pod of the cluster
enable_replica_load_balancer: "false" enable_replica_load_balancer: "false"
# define external traffic policy for the load balancer
external_traffic_policy: "Cluster"
# defines the DNS name string template for the master load balancer cluster # defines the DNS name string template for the master load balancer cluster
master_dns_name_format: '{cluster}.{team}.{hostedzone}' master_dns_name_format: '{cluster}.{team}.{hostedzone}'
# defines the DNS name string template for the replica load balancer cluster # defines the DNS name string template for the replica load balancer cluster
@ -191,6 +216,14 @@ configAwsOrGcp:
# AWS region used to store ESB volumes # AWS region used to store ESB volumes
aws_region: eu-central-1 aws_region: eu-central-1
# enable automatic migration on AWS from gp2 to gp3 volumes
enable_ebs_gp3_migration: "false"
# defines maximum volume size in GB until which auto migration happens
# enable_ebs_gp3_migration_max_size: "1000"
# GCP credentials for setting the GOOGLE_APPLICATION_CREDNETIALS environment variable
# gcp_credentials: ""
# AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods
# kube_iam_role: "" # kube_iam_role: ""
@ -200,10 +233,13 @@ configAwsOrGcp:
# S3 bucket to use for shipping WAL segments with WAL-E # S3 bucket to use for shipping WAL segments with WAL-E
# wal_s3_bucket: "" # wal_s3_bucket: ""
# GCS bucket to use for shipping WAL segments with WAL-E
# wal_gs_bucket: ""
# configure K8s cron job managed by the operator # configure K8s cron job managed by the operator
configLogicalBackup: configLogicalBackup:
# image for pods of the logical backup job (example runs pg_dumpall) # image for pods of the logical backup job (example runs pg_dumpall)
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:master-58" logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v.1.6.0"
# S3 Access Key ID # S3 Access Key ID
logical_backup_s3_access_key_id: "" logical_backup_s3_access_key_id: ""
# S3 bucket to store backup results # S3 bucket to store backup results
@ -224,6 +260,11 @@ configTeamsApi:
# team_admin_role will have the rights to grant roles coming from PG manifests # team_admin_role will have the rights to grant roles coming from PG manifests
# enable_admin_role_for_users: "true" # enable_admin_role_for_users: "true"
# operator watches for PostgresTeam CRs to assign additional teams and members to clusters
enable_postgres_team_crd: "false"
# toogle to create additional superuser teams from PostgresTeam CRs
# enable_postgres_team_crd_superusers: "false"
# toggle to grant superuser to team members created from the Teams API # toggle to grant superuser to team members created from the Teams API
# enable_team_superuser: "false" # enable_team_superuser: "false"
@ -257,7 +298,7 @@ configConnectionPooler:
# db user for pooler to use # db user for pooler to use
connection_pooler_user: "pooler" connection_pooler_user: "pooler"
# docker image # docker image
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7" connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9"
# max db connections the pooler should hold # max db connections the pooler should hold
connection_pooler_max_db_connections: "60" connection_pooler_max_db_connections: "60"
# default pooling mode # default pooling mode
@ -291,8 +332,12 @@ podServiceAccount:
# If not set a name is generated using the fullname template and "-pod" suffix # If not set a name is generated using the fullname template and "-pod" suffix
name: "postgres-pod" name: "postgres-pod"
# priority class for operator pod
priorityClassName: "" priorityClassName: ""
# priority class for database pods
podPriorityClassName: ""
resources: resources:
limits: limits:
cpu: 500m cpu: 500m

View File

@ -2,7 +2,7 @@ package main
import ( import (
"flag" "flag"
"log" log "github.com/sirupsen/logrus"
"os" "os"
"os/signal" "os/signal"
"sync" "sync"
@ -36,6 +36,8 @@ func init() {
flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API") flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API")
flag.Parse() flag.Parse()
config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true"
configMapRawName := os.Getenv("CONFIG_MAP_NAME") configMapRawName := os.Getenv("CONFIG_MAP_NAME")
if configMapRawName != "" { if configMapRawName != "" {
@ -63,6 +65,9 @@ func init() {
func main() { func main() {
var err error var err error
if config.EnableJsonLogging {
log.SetFormatter(&log.JSONFormatter{})
}
log.SetOutput(os.Stdout) log.SetOutput(os.Stdout)
log.Printf("Spilo operator %s\n", version) log.Printf("Spilo operator %s\n", version)

View File

@ -2,6 +2,10 @@ version: "2017-09-20"
pipeline: pipeline:
- id: build-postgres-operator - id: build-postgres-operator
type: script type: script
vm: large
cache:
paths:
- /go/pkg/mod
commands: commands:
- desc: 'Update' - desc: 'Update'
cmd: | cmd: |
@ -12,7 +16,7 @@ pipeline:
- desc: 'Install go' - desc: 'Install go'
cmd: | cmd: |
cd /tmp cd /tmp
wget -q https://storage.googleapis.com/golang/go1.14.linux-amd64.tar.gz -O go.tar.gz wget -q https://storage.googleapis.com/golang/go1.15.6.linux-amd64.tar.gz -O go.tar.gz
tar -xf go.tar.gz tar -xf go.tar.gz
mv go /usr/local mv go /usr/local
ln -s /usr/local/go/bin/go /usr/bin/go ln -s /usr/local/go/bin/go /usr/bin/go
@ -28,7 +32,7 @@ pipeline:
IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test
fi fi
export IMAGE export IMAGE
make deps docker make deps mocks docker
- desc: 'Run unit tests' - desc: 'Run unit tests'
cmd: | cmd: |
export PATH=$PATH:$HOME/go/bin export PATH=$PATH:$HOME/go/bin
@ -76,3 +80,15 @@ pipeline:
export IMAGE export IMAGE
make docker make docker
make push make push
- id: build-logical-backup
type: script
commands:
- desc: Build image
cmd: |
cd docker/logical-backup
export TAG=$(git describe --tags --always --dirty)
IMAGE="registry-write.opensource.zalan.do/acid/logical-backup"
docker build --rm -t "$IMAGE:$TAG$CDP_TAG" .
docker push "$IMAGE:$TAG$CDP_TAG"

View File

@ -2,6 +2,7 @@ FROM alpine
MAINTAINER Team ACID @ Zalando <team-acid@zalando.de> MAINTAINER Team ACID @ Zalando <team-acid@zalando.de>
# We need root certificates to deal with teams api over https # We need root certificates to deal with teams api over https
RUN apk --no-cache add curl
RUN apk --no-cache add ca-certificates RUN apk --no-cache add ca-certificates
COPY build/* / COPY build/* /

View File

@ -13,12 +13,16 @@ RUN apt-get update \
curl \ curl \
jq \ jq \
gnupg \ gnupg \
gcc \
libffi-dev \
&& pip3 install --no-cache-dir awscli --upgrade \ && pip3 install --no-cache-dir awscli --upgrade \
&& pip3 install --no-cache-dir gsutil --upgrade \
&& echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ && echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list \
&& cat /etc/apt/sources.list.d/pgdg.list \ && cat /etc/apt/sources.list.d/pgdg.list \
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
&& apt-get update \ && apt-get update \
&& apt-get install --no-install-recommends -y \ && apt-get install --no-install-recommends -y \
postgresql-client-13 \
postgresql-client-12 \ postgresql-client-12 \
postgresql-client-11 \ postgresql-client-11 \
postgresql-client-10 \ postgresql-client-10 \

View File

@ -46,6 +46,23 @@ function aws_upload {
aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}"
} }
function gcs_upload {
PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz
gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP"
}
function upload {
case $LOGICAL_BACKUP_PROVIDER in
"gcs")
gcs_upload
;;
*)
aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF))
;;
esac
}
function get_pods { function get_pods {
declare -r SELECTOR="$1" declare -r SELECTOR="$1"
@ -93,7 +110,7 @@ for search in "${search_strategy[@]}"; do
done done
set -x set -x
dump | compress | aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF)) dump | compress | upload
[[ ${PIPESTATUS[0]} != 0 || ${PIPESTATUS[1]} != 0 || ${PIPESTATUS[2]} != 0 ]] && (( ERRORCOUNT += 1 )) [[ ${PIPESTATUS[0]} != 0 || ${PIPESTATUS[1]} != 0 || ${PIPESTATUS[2]} != 0 ]] && (( ERRORCOUNT += 1 ))
set +x set +x

View File

@ -11,17 +11,29 @@ switchover (planned failover) of the master to the Pod with new minor version.
The switch should usually take less than 5 seconds, still clients have to The switch should usually take less than 5 seconds, still clients have to
reconnect. reconnect.
Major version upgrades are supported via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster). Major version upgrades are supported either via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
The new cluster manifest must have a higher `version` string than the source or in-place.
cluster and will be created from a basebackup. Depending of the cluster size,
downtime in this case can be significant as writes to the database should be
stopped and all WAL files should be archived first before cloning is started.
Note, that simply changing the version string in the `postgresql` manifest does With cloning, the new cluster manifest must have a higher `version` string than
not work at present and leads to errors. Neither Patroni nor Postgres Operator the source cluster and will be created from a basebackup. Depending of the
can do in place `pg_upgrade`. Still, it can be executed manually in the Postgres cluster size, downtime in this case can be significant as writes to the database
container, which is tricky (i.e. systems need to be stopped, replicas have to be should be stopped and all WAL files should be archived first before cloning is
synced) but of course faster than cloning. started.
Starting with Spilo 13, Postgres Operator can do in-place major version upgrade,
which should be faster than cloning. However, it is not fully automatic yet.
First, you need to make sure, that setting the `PGVERSION` environment variable
is enabled in the configuration. Since `v1.6.0`, `enable_pgversion_env_var` is
enabled by default.
To trigger the upgrade, increase the version in the cluster manifest. After
Pods are rotated `configure_spilo` will notice the version mismatch and start
the old version again. You can then exec into the Postgres container of the
master instance and call `python3 /scripts/inplace_upgrade.py N` where `N`
is the number of members of your cluster (see `number_of_instances`). The
upgrade is usually fast, well under one minute for most DBs. Note, that changes
become irrevertible once `pg_upgrade` is called. To understand the upgrade
procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
## CRD Validation ## CRD Validation
@ -44,7 +56,7 @@ Once the validation is enabled it can only be disabled manually by editing or
patching the CRD manifest: patching the CRD manifest:
```bash ```bash
zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}' kubectl patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}'
``` ```
## Non-default cluster domain ## Non-default cluster domain
@ -123,6 +135,68 @@ Every other Postgres cluster which lacks the annotation will be ignored by this
operator. Conversely, operators without a defined `CONTROLLER_ID` will ignore operator. Conversely, operators without a defined `CONTROLLER_ID` will ignore
clusters with defined ownership of another operator. clusters with defined ownership of another operator.
## Delete protection via annotations
To avoid accidental deletes of Postgres clusters the operator can check the
manifest for two existing annotations containing the cluster name and/or the
current date (in YYYY-MM-DD format). The name of the annotation keys can be
defined in the configuration. By default, they are not set which disables the
delete protection. Thus, one could choose to only go with one annotation.
**postgres-operator ConfigMap**
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-operator
data:
delete_annotation_date_key: "delete-date"
delete_annotation_name_key: "delete-clustername"
```
**OperatorConfiguration**
```yaml
apiVersion: "acid.zalan.do/v1"
kind: OperatorConfiguration
metadata:
name: postgresql-operator-configuration
configuration:
kubernetes:
delete_annotation_date_key: "delete-date"
delete_annotation_name_key: "delete-clustername"
```
Now, every cluster manifest must contain the configured annotation keys to
trigger the delete process when running `kubectl delete pg`. Note, that the
`Postgresql` resource would still get deleted as K8s' API server does not
block it. Only the operator logs will tell, that the delete criteria wasn't
met.
**cluster manifest**
```yaml
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: demo-cluster
annotations:
delete-date: "2020-08-31"
delete-clustername: "demo-cluster"
spec:
...
```
In case, the resource has been deleted accidentally or the annotations were
simply forgotten, it's safe to recreate the cluster with `kubectl create`.
Existing Postgres cluster are not replaced by the operator. But, as the
original cluster still exists the status will show `CreateFailed` at first.
On the next sync event it should change to `Running`. However, as it is in
fact a new resource for K8s, the UID will differ which can trigger a rolling
update of the pods because the UID is used as part of backup path to S3.
## Role-based access control for the operator ## Role-based access control for the operator
The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml) The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml)
@ -319,11 +393,18 @@ spec:
## Custom Pod Environment Variables ## Custom Pod Environment Variables
It is possible to configure a ConfigMap as well as a Secret which are used by the Postgres pods as
It is possible to configure a ConfigMap which is used by the Postgres pods as
an additional provider for environment variables. One use case is to customize an additional provider for environment variables. One use case is to customize
the Spilo image and configure it with environment variables. The ConfigMap with the Spilo image and configure it with environment variables. Another case could be to provide custom
the additional settings is referenced in the operator's main configuration. cloud provider or backup settings.
In general the Operator will give preference to the globally configured variables, to not have the custom
ones interfere with core functionality. Variables with the 'WAL_' and 'LOG_' prefix can be overwritten though, to allow
backup and logshipping to be specified differently.
### Via ConfigMap
The ConfigMap with the additional settings is referenced in the operator's main configuration.
A namespace can be specified along with the name. If left out, the configured A namespace can be specified along with the name. If left out, the configured
default namespace of your K8s client will be used and if the ConfigMap is not default namespace of your K8s client will be used and if the ConfigMap is not
found there, the Postgres cluster's namespace is taken when different: found there, the Postgres cluster's namespace is taken when different:
@ -365,7 +446,54 @@ data:
MY_CUSTOM_VAR: value MY_CUSTOM_VAR: value
``` ```
This ConfigMap is then added as a source of environment variables to the The key-value pairs of the ConfigMap are then added as environment variables to the
Postgres StatefulSet/pods.
### Via Secret
The Secret with the additional variables is referenced in the operator's main configuration.
To protect the values of the secret from being exposed in the pod spec they are each referenced
as SecretKeyRef.
This does not allow for the secret to be in a different namespace as the pods though
**postgres-operator ConfigMap**
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-operator
data:
# referencing secret with custom environment variables
pod_environment_secret: postgres-pod-secrets
```
**OperatorConfiguration**
```yaml
apiVersion: "acid.zalan.do/v1"
kind: OperatorConfiguration
metadata:
name: postgresql-operator-configuration
configuration:
kubernetes:
# referencing secret with custom environment variables
pod_environment_secret: postgres-pod-secrets
```
**referenced Secret `postgres-pod-secrets`**
```yaml
apiVersion: v1
kind: Secret
metadata:
name: postgres-pod-secrets
namespace: default
data:
MY_CUSTOM_VAR: dmFsdWU=
```
The key-value pairs of the Secret are all accessible as environment variables to the
Postgres StatefulSet/pods. Postgres StatefulSet/pods.
## Limiting the number of min and max instances in clusters ## Limiting the number of min and max instances in clusters
@ -445,9 +573,12 @@ database.
* **Human users** originate from the [Teams API](user.md#teams-api-roles) that * **Human users** originate from the [Teams API](user.md#teams-api-roles) that
returns a list of the team members given a team id. The operator differentiates returns a list of the team members given a team id. The operator differentiates
between (a) product teams that own a particular Postgres cluster and are granted between (a) product teams that own a particular Postgres cluster and are granted
admin rights to maintain it, and (b) Postgres superuser teams that get the admin rights to maintain it, (b) Postgres superuser teams that get superuser
superuser access to all Postgres databases running in a K8s cluster for the access to all Postgres databases running in a K8s cluster for the purposes of
purposes of maintaining and troubleshooting. maintaining and troubleshooting, and (c) additional teams, superuser teams or
members associated with the owning team. The latter is managed via the
[PostgresTeam CRD](user.md#additional-teams-and-members-per-cluster).
## Understanding rolling update of Spilo pods ## Understanding rolling update of Spilo pods
@ -518,6 +649,83 @@ A secret can be pre-provisioned in different ways:
* Automatically provisioned via a custom K8s controller like * Automatically provisioned via a custom K8s controller like
[kube-aws-iam-controller](https://github.com/mikkeloscar/kube-aws-iam-controller) [kube-aws-iam-controller](https://github.com/mikkeloscar/kube-aws-iam-controller)
## Google Cloud Platform setup
To configure the operator on GCP there are some prerequisites that are needed:
* A service account with the proper IAM setup to access the GCS bucket for the WAL-E logs
* The credentials file for the service account.
The configuration paramaters that we will be using are:
* `additional_secret_mount`
* `additional_secret_mount_path`
* `gcp_credentials`
* `wal_gs_bucket`
### Generate a K8s secret resource
Generate the K8s secret resource that will contain your service account's
credentials. It's highly recommended to use a service account and limit its
scope to just the WAL-E bucket.
```yaml
apiVersion: v1
kind: Secret
metadata:
name: psql-wale-creds
namespace: default
type: Opaque
stringData:
key.json: |-
<GCP .json credentials>
```
### Setup your operator configuration values
With the `psql-wale-creds` resource applied to your cluster, ensure that
the operator's configuration is set up like the following:
```yml
...
aws_or_gcp:
additional_secret_mount: "pgsql-wale-creds"
additional_secret_mount_path: "/var/secrets/google" # or where ever you want to mount the file
# aws_region: eu-central-1
# kube_iam_role: ""
# log_s3_bucket: ""
# wal_s3_bucket: ""
wal_gs_bucket: "postgres-backups-bucket-28302F2" # name of bucket on where to save the WAL-E logs
gcp_credentials: "/var/secrets/google/key.json" # combination of the mount path & key in the K8s resource. (i.e. key.json)
...
```
### Setup pod environment configmap
To make postgres-operator work with GCS, use following configmap:
```yml
apiVersion: v1
kind: ConfigMap
metadata:
name: pod-env-overrides
namespace: postgres-operator-system
data:
# Any env variable used by spilo can be added
USE_WALG_BACKUP: "true"
USE_WALG_RESTORE: "true"
CLONE_USE_WALG_RESTORE: "true"
```
This configmap will instruct operator to use WAL-G, instead of WAL-E, for backup and restore.
Then provide this configmap in postgres-operator settings:
```yml
...
# namespaced name of the ConfigMap with environment variables to populate on every pod
pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
...
```
## Sidecars for Postgres clusters ## Sidecars for Postgres clusters
A list of sidecars is added to each cluster created by the operator. The default A list of sidecars is added to each cluster created by the operator. The default

View File

@ -237,9 +237,11 @@ kubectl logs acid-minimal-cluster-0
## End-to-end tests ## End-to-end tests
The operator provides reference end-to-end tests (e2e) (as Docker image) to The operator provides reference end-to-end (e2e) tests to
ensure various infrastructure parts work smoothly together. Each e2e execution ensure various infrastructure parts work smoothly together. The test code is available at `e2e/tests`.
tests a Postgres Operator image built from the current git branch. The test The special `registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build.
Each e2e execution tests a Postgres Operator image built from the current git branch. The test
runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/), runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/),
utilizes provided manifest examples, and runs e2e tests contained in the `tests` utilizes provided manifest examples, and runs e2e tests contained in the `tests`
folder. The K8s API client in the container connects to the `kind` cluster via folder. The K8s API client in the container connects to the `kind` cluster via
@ -284,7 +286,7 @@ manifest files:
Postgres manifest parameters are defined in the [api package](../pkg/apis/acid.zalan.do/v1/postgresql_type.go). Postgres manifest parameters are defined in the [api package](../pkg/apis/acid.zalan.do/v1/postgresql_type.go).
The operator behavior has to be implemented at least in [k8sres.go](../pkg/cluster/k8sres.go). The operator behavior has to be implemented at least in [k8sres.go](../pkg/cluster/k8sres.go).
Validation of CRD parameters is controlled in [crd.go](../pkg/apis/acid.zalan.do/v1/crds.go). Validation of CRD parameters is controlled in [crds.go](../pkg/apis/acid.zalan.do/v1/crds.go).
Please, reflect your changes in tests, for example in: Please, reflect your changes in tests, for example in:
* [config_test.go](../pkg/util/config/config_test.go) * [config_test.go](../pkg/util/config/config_test.go)
* [k8sres_test.go](../pkg/cluster/k8sres_test.go) * [k8sres_test.go](../pkg/cluster/k8sres_test.go)

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 849 KiB

View File

@ -1,63 +0,0 @@
<h1>Google Summer of Code 2019</h1>
## Applications steps
1. Please carefully read the official [Google Summer of Code Student Guide](https://google.github.io/gsocguides/student/)
2. Join the #postgres-operator slack channel under [Postgres Slack](https://postgres-slack.herokuapp.com) to introduce yourself to the community and get quick feedback on your application.
3. Select a project from the list of ideas below or propose your own.
4. Write a proposal draft. Please open an issue with the label `gsoc2019_application` in the [operator repository](https://github.com/zalando/postgres-operator/issues) so that the community members can publicly review it. See proposal instructions below for details.
5. Submit proposal and the proof of enrollment before April 9 2019 18:00 UTC through the web site of the Program.
## Project ideas
### Place database pods into the "Guaranteed" Quality-of-Service class
* **Description**: Kubernetes runtime does not kill pods in this class on condition they stay within their resource limits, which is desirable for the DB pods serving production workloads. To be assigned to that class, pod's resources must equal its limits. The task is to add the `enableGuaranteedQoSClass` or the like option to the Postgres manifest and the operator configmap that forcibly re-write pod resources to match the limits.
* **Recommended skills**: golang, basic Kubernetes abstractions
* **Difficulty**: moderate
* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
### Implement the kubectl plugin for the Postgres CustomResourceDefinition
* **Description**: [kubectl plugins](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/) enable extending the Kubernetes command-line client `kubectl` with commands to manage custom resources. The task is to design and implement a plugin for the `kubectl postgres` command,
that can enable, for example, correct deletion or major version upgrade of Postgres clusters.
* **Recommended skills**: golang, shell scripting, operational experience with Kubernetes
* **Difficulty**: moderate to medium, depending on the plugin design
* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
### Implement the openAPIV3Schema for the Postgres CRD
* **Description**: at present the operator validates a database manifest on its own.
It will be helpful to reject erroneous manifests before they reach the operator using the [native Kubernetes CRD validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation). It is up to the student to decide whether to write the schema manually or to adopt existing [schema generator developed for the Prometheus project](https://github.com/ant31/crd-validation).
* **Recommended skills**: golang, JSON schema
* **Difficulty**: medium
* **Mentor(s)**: Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
* **Issue**: [#388](https://github.com/zalando/postgres-operator/issues/388)
### Design a solution for the local testing of the operator
* **Description**: The current way of testing is to run minikube, either manually or with some tooling around it like `/run-operator_locally.sh` or Vagrant. This has at least three problems:
First, minikube is a single node cluster, so it is unsuitable for testing vital functions such as pod migration between nodes. Second, minikube starts slowly; that prolongs local testing.
Third, every contributor needs to come up with their own solution for local testing. The task is to come up with a better option which will enable us to conveniently and uniformly run e2e tests locally / potentially in Travis CI.
A promising option is the Kubernetes own [kind](https://github.com/kubernetes-sigs/kind)
* **Recommended skills**: Docker, shell scripting, basic Kubernetes abstractions
* **Difficulty**: medium to hard depending on the selected desing
* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
* **Issue**: [#475](https://github.com/zalando/postgres-operator/issues/475)
### Detach a Postgres cluster from the operator for maintenance
* **Description**: sometimes a Postgres cluster requires manual maintenance. During such maintenance the operator should ignore all the changes manually applied to the cluster.
Currently the only way to achieve this behavior is to shutdown the operator altogether, for instance by scaling down the operator's own deployment to zero pods. That approach evidently affects all Postgres databases under the operator control and thus is highly undesirable in production Kubernetes clusters. It would be much better to be able to detach only the desired Postgres cluster from the operator for the time being and re-attach it again after maintenance.
* **Recommended skills**: golang, architecture of a Kubernetes operator
* **Difficulty**: hard - requires significant modification of the operator's internals and careful consideration of the corner cases.
* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
* **Issue**: [#421](https://github.com/zalando/postgres-operator/issues/421)
### Propose your own idea
Feel free to come up with your own ideas. For inspiration,
see [our bug tracker](https://github.com/zalando/postgres-operator/issues),
the [official `CustomResouceDefinition` docs](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)
and [other operators](https://github.com/operator-framework/awesome-operators).

View File

@ -34,8 +34,8 @@ Postgres cluster. This can work in two ways: via a ConfigMap or a custom
The Postgres Operator can be deployed in the following ways: The Postgres Operator can be deployed in the following ways:
* Manual deployment * Manual deployment
* Kustomization
* Helm chart * Helm chart
* Operator Lifecycle Manager (OLM)
### Manual deployment setup ### Manual deployment setup
@ -91,20 +91,6 @@ The chart works with both Helm 2 and Helm 3. The `crd-install` hook from v2 will
be skipped with warning when using v3. Documentation for installing applications be skipped with warning when using v3. Documentation for installing applications
with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/). with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/).
### Operator Lifecycle Manager (OLM)
The [Operator Lifecycle Manager (OLM)](https://github.com/operator-framework/operator-lifecycle-manager)
has been designed to facilitate management of K8s operators. It has to be
installed in your K8s environment. When OLM is set up simply download and deploy
the Postgres Operator with the following command:
```bash
kubectl create -f https://operatorhub.io/install/postgres-operator.yaml
```
This installs the operator in the `operators` namespace. More information can be
found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator).
## Check if Postgres Operator is running ## Check if Postgres Operator is running
Starting the operator may take a few seconds. Check if the operator pod is Starting the operator may take a few seconds. Check if the operator pod is
@ -142,6 +128,9 @@ To deploy the UI simply apply all its manifests files or use the UI helm chart:
# manual deployment # manual deployment
kubectl apply -f ui/manifests/ kubectl apply -f ui/manifests/
# or kustomization
kubectl apply -k github.com/zalando/postgres-operator/ui/manifests
# or helm chart # or helm chart
helm install postgres-operator-ui ./charts/postgres-operator-ui helm install postgres-operator-ui ./charts/postgres-operator-ui
``` ```
@ -160,7 +149,7 @@ You can now access the web interface by port forwarding the UI pod (mind the
label selector) and enter `localhost:8081` in your browser: label selector) and enter `localhost:8081` in your browser:
```bash ```bash
kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081 kubectl port-forward svc/postgres-operator-ui 8081:80
``` ```
Available option are explained in detail in the [UI docs](operator-ui.md). Available option are explained in detail in the [UI docs](operator-ui.md).

View File

@ -65,6 +65,20 @@ These parameters are grouped directly under the `spec` key in the manifest.
custom Docker image that overrides the **docker_image** operator parameter. custom Docker image that overrides the **docker_image** operator parameter.
It should be a [Spilo](https://github.com/zalando/spilo) image. Optional. It should be a [Spilo](https://github.com/zalando/spilo) image. Optional.
* **schedulerName**
specifies the scheduling profile for database pods. If no value is provided
K8s' `default-scheduler` will be used. Optional.
* **spiloRunAsUser**
sets the user ID which should be used in the container to run the process.
This must be set to run the container without root. By default the container
runs with root. This option only works for Spilo versions >= 1.6-p3.
* **spiloRunAsGroup**
sets the group ID which should be used in the container to run the process.
This must be set to run the container without root. By default the container
runs with root. This option only works for Spilo versions >= 1.6-p3.
* **spiloFSGroup** * **spiloFSGroup**
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
writable by the group ID specified. This will override the **spilo_fsgroup** writable by the group ID specified. This will override the **spilo_fsgroup**
@ -141,10 +155,15 @@ These parameters are grouped directly under the `spec` key in the manifest.
configured (so you can override the operator configuration). Optional. configured (so you can override the operator configuration). Optional.
* **enableConnectionPooler** * **enableConnectionPooler**
Tells the operator to create a connection pooler with a database. If this Tells the operator to create a connection pooler with a database for the master
field is true, a connection pooler deployment will be created even if service. If this field is true, a connection pooler deployment will be created even if
`connectionPooler` section is empty. Optional, not set by default. `connectionPooler` section is empty. Optional, not set by default.
* **enableReplicaConnectionPooler**
Tells the operator to create a connection pooler with a database for the replica
service. If this field is true, a connection pooler deployment for replica
will be created even if `connectionPooler` section is empty. Optional, not set by default.
* **enableLogicalBackup** * **enableLogicalBackup**
Determines if the logical backup of this cluster should be taken and uploaded Determines if the logical backup of this cluster should be taken and uploaded
to S3. Default: false. Optional. to S3. Default: false. Optional.
@ -231,10 +250,10 @@ explanation of `ttl` and `loop_wait` parameters.
* **synchronous_mode** * **synchronous_mode**
Patroni `synchronous_mode` parameter value. The default is set to `false`. Optional. Patroni `synchronous_mode` parameter value. The default is set to `false`. Optional.
* **synchronous_mode_strict** * **synchronous_mode_strict**
Patroni `synchronous_mode_strict` parameter value. Can be used in addition to `synchronous_mode`. The default is set to `false`. Optional. Patroni `synchronous_mode_strict` parameter value. Can be used in addition to `synchronous_mode`. The default is set to `false`. Optional.
## Postgres container resources ## Postgres container resources
Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
@ -387,8 +406,10 @@ CPU and memory limits for the sidecar container.
Parameters are grouped under the `connectionPooler` top-level key and specify Parameters are grouped under the `connectionPooler` top-level key and specify
configuration for connection pooler. If this section is not empty, a connection configuration for connection pooler. If this section is not empty, a connection
pooler will be created for a database even if `enableConnectionPooler` is not pooler will be created for master service only even if `enableConnectionPooler`
present. is not present. But if this section is present then it defines the configuration
for both master and replica pooler services (if `enableReplicaConnectionPooler`
is enabled).
* **numberOfInstances** * **numberOfInstances**
How many instances of connection pooler to create. How many instances of connection pooler to create.

View File

@ -56,3 +56,7 @@ The following environment variables are accepted by the operator:
* **CRD_READY_WAIT_INTERVAL** * **CRD_READY_WAIT_INTERVAL**
defines the interval between consecutive attempts waiting for the defines the interval between consecutive attempts waiting for the
`postgresql` CRD to be created. The default is 5s. `postgresql` CRD to be created. The default is 5s.
* **ENABLE_JSON_LOGGING**
Set to `true` for JSON formatted logging output.
The default is false.

View File

@ -76,9 +76,15 @@ Those are top-level keys, containing both leaf keys and groups.
The default is `true`. The default is `true`.
* **enable_lazy_spilo_upgrade** * **enable_lazy_spilo_upgrade**
Instruct operator to update only the statefulsets with the new image without immediately doing the rolling update. The assumption is pods will be re-started later with the new image, for example due to the node rotation. Instruct operator to update only the statefulsets with new images (Spilo and InitContainers) without immediately doing the rolling update. The assumption is pods will be re-started later with new images, for example due to the node rotation.
The default is `false`. The default is `false`.
* **enable_pgversion_env_var**
With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`.
* **enable_spilo_wal_path_compat**
enables backwards compatible path between Spilo 12 and Spilo 13 images. The default is `false`.
* **etcd_host** * **etcd_host**
Etcd connection string for Patroni defined as `host:port`. Not required when Etcd connection string for Patroni defined as `host:port`. Not required when
Patroni native Kubernetes support is used. The default is empty (use Patroni native Kubernetes support is used. The default is empty (use
@ -200,6 +206,16 @@ configuration they are grouped under the `kubernetes` key.
of a database created by the operator. If the annotation key is also provided of a database created by the operator. If the annotation key is also provided
by the database definition, the database definition value is used. by the database definition, the database definition value is used.
* **delete_annotation_date_key**
key name for annotation that compares manifest value with current date in the
YYYY-MM-DD format. Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`.
The default is empty which also disables this delete protection check.
* **delete_annotation_name_key**
key name for annotation that compares manifest value with Postgres cluster name.
Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. The default is
empty which also disables this delete protection check.
* **downscaler_annotations** * **downscaler_annotations**
An array of annotations that should be passed from Postgres CRD on to the An array of annotations that should be passed from Postgres CRD on to the
statefulset and, if exists, to the connection pooler deployment as well. statefulset and, if exists, to the connection pooler deployment as well.
@ -252,8 +268,20 @@ configuration they are grouped under the `kubernetes` key.
teams API. The default is `postgresql-operator`. teams API. The default is `postgresql-operator`.
* **infrastructure_roles_secret_name** * **infrastructure_roles_secret_name**
namespaced name of the secret containing infrastructure roles names and *deprecated*: namespaced name of the secret containing infrastructure roles
passwords. with user names, passwords and role membership.
* **infrastructure_roles_secrets**
array of infrastructure role definitions which reference existing secrets
and specify the key names from which user name, password and role membership
are extracted. For the ConfigMap this has to be a string which allows
referencing only one infrastructure roles secret. The default is empty.
* **inherited_annotations**
list of annotation keys that can be inherited from the cluster manifest, and
added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PDB` and
`Services`) created by the operator incl. the ones from the connection
pooler deployment. The default is empty.
* **pod_role_label** * **pod_role_label**
name of the label assigned to the Postgres pods (and services/endpoints) by name of the label assigned to the Postgres pods (and services/endpoints) by
@ -264,15 +292,16 @@ configuration they are grouped under the `kubernetes` key.
objects. The default is `application:spilo`. objects. The default is `application:spilo`.
* **inherited_labels** * **inherited_labels**
list of labels that can be inherited from the cluster manifest, and added to list of label keys that can be inherited from the cluster manifest, and
each child objects (`StatefulSet`, `Pod`, `Service` and `Endpoints`) created added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PVCs`,
by the operator. Typical use case is to dynamically pass labels that are `PDB`, `Service`, `Endpoints` and `Secrets`) created by the operator.
specific to a given Postgres cluster, in order to implement `NetworkPolicy`. Typical use case is to dynamically pass labels that are specific to a
The default is empty. given Postgres cluster, in order to implement `NetworkPolicy`. The default
is empty.
* **cluster_name_label** * **cluster_name_label**
name of the label assigned to Kubernetes objects created by the operator that name of the label assigned to Kubernetes objects created by the operator
indicates which cluster a given object belongs to. The default is that indicates which cluster a given object belongs to. The default is
`cluster-name`. `cluster-name`.
* **node_readiness_label** * **node_readiness_label**
@ -301,6 +330,16 @@ configuration they are grouped under the `kubernetes` key.
that should be assigned to the Postgres pods. The priority class itself must that should be assigned to the Postgres pods. The priority class itself must
be defined in advance. Default is empty (use the default priority class). be defined in advance. Default is empty (use the default priority class).
* **spilo_runasuser**
sets the user ID which should be used in the container to run the process.
This must be set to run the container without root. By default the container
runs with root. This option only works for Spilo versions >= 1.6-p3.
* **spilo_runasgroup**
sets the group ID which should be used in the container to run the process.
This must be set to run the container without root. By default the container
runs with root. This option only works for Spilo versions >= 1.6-p3.
* **spilo_fsgroup** * **spilo_fsgroup**
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
writable by the group ID specified. This is required to run Spilo as a writable by the group ID specified. This is required to run Spilo as a
@ -333,6 +372,12 @@ configuration they are grouped under the `kubernetes` key.
of stateful sets of PG clusters. The default is `ordered_ready`, the second of stateful sets of PG clusters. The default is `ordered_ready`, the second
possible value is `parallel`. possible value is `parallel`.
* **storage_resize_mode**
defines how operator handels the difference between requested volume size and
actual size. Available options are: ebs - tries to resize EBS volume, pvc -
changes PVC definition, off - disables resize of the volumes. Default is "pvc".
When using OpenShift please use one of the other available options.
## Kubernetes resource requests ## Kubernetes resource requests
This group allows you to configure resource requests for the Postgres pods. This group allows you to configure resource requests for the Postgres pods.
@ -402,6 +447,12 @@ CRD-based configuration.
Those options affect the behavior of load balancers created by the operator. Those options affect the behavior of load balancers created by the operator.
In the CRD-based configuration they are grouped under the `load_balancer` key. In the CRD-based configuration they are grouped under the `load_balancer` key.
* **custom_service_annotations**
This key/value map provides a list of annotations that get attached to each
service of a cluster created by the operator. If the annotation key is also
provided by the cluster definition, the manifest value is used.
Optional.
* **db_hosted_zone** * **db_hosted_zone**
DNS zone for the cluster DNS name when the load balancer is configured for DNS zone for the cluster DNS name when the load balancer is configured for
the cluster. Only used when combined with the cluster. Only used when combined with
@ -418,11 +469,8 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
cluster. Can be overridden by individual cluster settings. The default is cluster. Can be overridden by individual cluster settings. The default is
`false`. `false`.
* **custom_service_annotations** * **external_traffic_policy** defines external traffic policy for load
This key/value map provides a list of annotations that get attached to each balancers. Allowed values are `Cluster` (default) and `Local`.
service of a cluster created by the operator. If the annotation key is also
provided by the cluster definition, the manifest value is used.
Optional.
* **master_dns_name_format** defines the DNS name string template for the * **master_dns_name_format** defines the DNS name string template for the
master load balancer cluster. The default is master load balancer cluster. The default is
@ -451,6 +499,20 @@ yet officially supported.
present and accessible by Postgres pods. At the moment, supported services by present and accessible by Postgres pods. At the moment, supported services by
Spilo are S3 and GCS. The default is empty. Spilo are S3 and GCS. The default is empty.
* **wal_gs_bucket**
GCS bucket to use for shipping WAL segments with WAL-E. A bucket has to be
present and accessible by Postgres pods. Note, only the name of the bucket is
required. At the moment, supported services by Spilo are S3 and GCS.
The default is empty.
* **gcp_credentials**
Used to set the GOOGLE_APPLICATION_CREDENTIALS environment variable for the pods.
This is used in with conjunction with the `additional_secret_mount` and
`additional_secret_mount_path` to properly set the credentials for the spilo
containers. This will allow users to use specific
[service accounts](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform).
The default is empty
* **log_s3_bucket** * **log_s3_bucket**
S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS. S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS.
The bucket has to be present and accessible by Postgres pods. The default is The bucket has to be present and accessible by Postgres pods. The default is
@ -466,10 +528,22 @@ yet officially supported.
AWS region used to store EBS volumes. The default is `eu-central-1`. AWS region used to store EBS volumes. The default is `eu-central-1`.
* **additional_secret_mount** * **additional_secret_mount**
Additional Secret (aws or gcp credentials) to mount in the pod. The default is empty. Additional Secret (aws or gcp credentials) to mount in the pod.
The default is empty.
* **additional_secret_mount_path** * **additional_secret_mount_path**
Path to mount the above Secret in the filesystem of the container(s). The default is empty. Path to mount the above Secret in the filesystem of the container(s).
The default is empty.
* **enable_ebs_gp3_migration**
enable automatic migration on AWS from gp2 to gp3 volumes, that are smaller
than the configured max size (see below). This ignores that EBS gp3 is by
default only 125 MB/sec vs 250 MB/sec for gp2 >= 333GB.
The default is `false`.
* **enable_ebs_gp3_migration_max_size**
defines the maximum volume size in GB until which auto migration happens.
Default is 1000 (1TB) which matches 3000 IOPS.
## Logical backup ## Logical backup
@ -489,6 +563,10 @@ grouped under the `logical_backup` key.
The default image is the same image built with the Zalando-internal CI The default image is the same image built with the Zalando-internal CI
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup" pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup"
* **logical_backup_provider**
Specifies the storage provider to which the backup should be uploaded (`s3` or `gcs`).
Default: "s3"
* **logical_backup_s3_bucket** * **logical_backup_s3_bucket**
S3 bucket to store backup results. The bucket has to be present and S3 bucket to store backup results. The bucket has to be present and
accessible by Postgres pods. Default: empty. accessible by Postgres pods. Default: empty.
@ -509,6 +587,9 @@ grouped under the `logical_backup` key.
* **logical_backup_s3_secret_access_key** * **logical_backup_s3_secret_access_key**
When set, value will be in AWS_SECRET_ACCESS_KEY env variable. The Default is empty. When set, value will be in AWS_SECRET_ACCESS_KEY env variable. The Default is empty.
* **logical_backup_google_application_credentials**
Specifies the path of the google cloud service account json file. Default is empty.
## Debugging the operator ## Debugging the operator
Options to aid debugging of the operator itself. Grouped under the `debug` key. Options to aid debugging of the operator itself. Grouped under the `debug` key.
@ -549,8 +630,8 @@ key.
The default is `"log_statement:all"` The default is `"log_statement:all"`
* **enable_team_superuser** * **enable_team_superuser**
whether to grant superuser to team members created from the Teams API. whether to grant superuser to members of the cluster's owning team created
The default is `false`. from the Teams API. The default is `false`.
* **team_admin_role** * **team_admin_role**
role name to grant to team members created from the Teams API. The default is role name to grant to team members created from the Teams API. The default is
@ -583,6 +664,16 @@ key.
cluster to administer Postgres and maintain infrastructure built around it. cluster to administer Postgres and maintain infrastructure built around it.
The default is empty. The default is empty.
* **enable_postgres_team_crd**
toggle to make the operator watch for created or updated `PostgresTeam` CRDs
and create roles for specified additional teams and members.
The default is `false`.
* **enable_postgres_team_crd_superusers**
in a `PostgresTeam` CRD additional superuser teams can assigned to teams that
own clusters. With this flag set to `false`, it will be ignored.
The default is `false`.
## Logging and REST API ## Logging and REST API
Parameters affecting logging and REST API listener. In the CRD-based Parameters affecting logging and REST API listener. In the CRD-based

View File

@ -49,7 +49,7 @@ Note, that the name of the cluster must start with the `teamId` and `-`. At
Zalando we use team IDs (nicknames) to lower the chance of duplicate cluster Zalando we use team IDs (nicknames) to lower the chance of duplicate cluster
names and colliding entities. The team ID would also be used to query an API to names and colliding entities. The team ID would also be used to query an API to
get all members of a team and create [database roles](#teams-api-roles) for get all members of a team and create [database roles](#teams-api-roles) for
them. them. Besides, the maximum cluster name length is 53 characters.
## Watch pods being created ## Watch pods being created
@ -150,23 +150,62 @@ user. There are two ways to define them:
#### Infrastructure roles secret #### Infrastructure roles secret
The infrastructure roles secret is specified by the `infrastructure_roles_secret_name` Infrastructure roles can be specified by the `infrastructure_roles_secrets`
parameter. The role definition looks like this (values are base64 encoded): parameter where you can reference multiple existing secrets. Prior to `v1.6.0`
the operator could only reference one secret with the
`infrastructure_roles_secret_name` option. However, this secret could contain
multiple roles using the same set of keys plus incrementing index.
```yaml ```yaml
user1: ZGJ1c2Vy apiVersion: v1
password1: c2VjcmV0 kind: Secret
inrole1: b3BlcmF0b3I= metadata:
name: postgresql-infrastructure-roles
data:
user1: ZGJ1c2Vy
password1: c2VjcmV0
inrole1: b3BlcmF0b3I=
user2: ...
``` ```
The block above describes the infrastructure role 'dbuser' with password The block above describes the infrastructure role 'dbuser' with password
'secret' that is a member of the 'operator' role. For the following definitions 'secret' that is a member of the 'operator' role. The resulting role will
one must increase the index, i.e. the next role will be defined as 'user2' and automatically be a login role.
so on. The resulting role will automatically be a login role.
Note that with definitions that solely use the infrastructure roles secret With the new option users can configure the names of secret keys that contain
there is no way to specify role options (like superuser or nologin) or role the user name, password etc. The secret itself is referenced by the
memberships. This is where the ConfigMap comes into play. `secretname` key. If the secret uses a template for multiple roles as described
above list them separately.
```yaml
apiVersion: v1
kind: OperatorConfiguration
metadata:
name: postgresql-operator-configuration
configuration:
kubernetes:
infrastructure_roles_secrets:
- secretname: "postgresql-infrastructure-roles"
userkey: "user1"
passwordkey: "password1"
rolekey: "inrole1"
- secretname: "postgresql-infrastructure-roles"
userkey: "user2"
...
```
Note, only the CRD-based configuration allows for referencing multiple secrets.
As of now, the ConfigMap is restricted to either one or the existing template
option with `infrastructure_roles_secret_name`. Please, refer to the example
manifests to understand how `infrastructure_roles_secrets` has to be configured
for the [configmap](../manifests/configmap.yaml) or [CRD configuration](../manifests/postgresql-operator-default-configuration.yaml).
If both `infrastructure_roles_secret_name` and `infrastructure_roles_secrets`
are defined the operator will create roles for both of them. So make sure,
they do not collide. Note also, that with definitions that solely use the
infrastructure roles secret there is no way to specify role options (like
superuser or nologin) or role memberships. This is where the additional
ConfigMap comes into play.
#### Secret plus ConfigMap #### Secret plus ConfigMap
@ -230,6 +269,144 @@ to choose superusers, group roles, [PAM configuration](https://github.com/CyberD
etc. An OAuth2 token can be passed to the Teams API via a secret. The name for etc. An OAuth2 token can be passed to the Teams API via a secret. The name for
this secret is configurable with the `oauth_token_secret_name` parameter. this secret is configurable with the `oauth_token_secret_name` parameter.
### Additional teams and members per cluster
Postgres clusters are associated with one team by providing the `teamID` in
the manifest. Additional superuser teams can be configured as mentioned in
the previous paragraph. However, this is a global setting. To assign
additional teams, superuser teams and single users to clusters of a given
team, use the [PostgresTeam CRD](../manifests/postgresteam.yaml).
Note, by default the `PostgresTeam` support is disabled in the configuration.
Switch `enable_postgres_team_crd` flag to `true` and the operator will start to
watch for this CRD. Make sure, the cluster role is up to date and contains a
section for [PostgresTeam](../manifests/operator-service-account-rbac.yaml#L30).
#### Additional teams
To assign additional teams and single users to clusters of a given team,
define a mapping with the `PostgresTeam` Kubernetes resource. The Postgres
Operator will read such team mappings each time it syncs all Postgres clusters.
```yaml
apiVersion: "acid.zalan.do/v1"
kind: PostgresTeam
metadata:
name: custom-team-membership
spec:
additionalTeams:
a-team:
- "b-team"
```
With the example above the operator will create login roles for all members
of `b-team` in every cluster owned by `a-team`. It's possible to do vice versa
for clusters of `b-team` in one manifest:
```yaml
spec:
additionalTeams:
a-team:
- "b-team"
b-team:
- "a-team"
```
You see, the `PostgresTeam` CRD is a global team mapping and independent from
the Postgres manifests. It is possible to define multiple mappings, even with
redundant content - the Postgres operator will create one internal cache from
it. Additional teams are resolved transitively, meaning you will also add
users for their `additionalTeams`, e.g.:
```yaml
spec:
additionalTeams:
a-team:
- "b-team"
- "c-team"
b-team:
- "a-team"
```
This creates roles for members of the `c-team` team not only in all clusters
owned by `a-team`, but as well in cluster owned by `b-team`, as `a-team` is
an `additionalTeam` to `b-team`
Not, you can also define `additionalSuperuserTeams` in the `PostgresTeam`
manifest. By default, this option is disabled and must be configured with
`enable_postgres_team_crd_superusers` to make it work.
#### Virtual teams
There can be "virtual teams" that do not exist in the Teams API. It can make
it easier to map a group of teams to many other teams:
```yaml
spec:
additionalTeams:
a-team:
- "virtual-team"
b-team:
- "virtual-team"
virtual-team:
- "c-team"
- "d-team"
```
This example would create roles for members of `c-team` and `d-team` plus
additional `virtual-team` members in clusters owned by `a-team` or `b-team`.
#### Teams changing their names
With `PostgresTeams` it is also easy to cover team name changes. Just add
the mapping between old and new team name and the rest can stay the same.
E.g. if team `a-team`'s name would change to `f-team` in the teams API it
could be reflected in a `PostgresTeam` mapping with just two lines:
```yaml
spec:
additionalTeams:
a-team:
- "f-team"
```
This is helpful, because Postgres cluster names are immutable and can not
be changed. Only via cloning it could get a different name starting with the
new `teamID`.
#### Additional members
Single members might be excluded from teams although they continue to work
with the same people. However, the teams API would not reflect this anymore.
To still add a database role for former team members list their role under
the `additionalMembers` section of the `PostgresTeam` resource:
```yaml
apiVersion: "acid.zalan.do/v1"
kind: PostgresTeam
metadata:
name: custom-team-membership
spec:
additionalMembers:
a-team:
- "tia"
```
This will create the login role `tia` in every cluster owned by `a-team`.
The user can connect to databases like the other team members.
The `additionalMembers` map can also be used to define users of virtual
teams, e.g. for `virtual-team` we used above:
```yaml
spec:
additionalMembers:
virtual-team:
- "flynch"
- "rdecker"
- "briggs"
```
## Prepared databases with roles and default privileges ## Prepared databases with roles and default privileges
The `users` section in the manifests only allows for creating database roles The `users` section in the manifests only allows for creating database roles
@ -412,7 +589,7 @@ manifest the operator will raise the limits to the configured minimum values.
If no resources are defined in the manifest they will be obtained from the If no resources are defined in the manifest they will be obtained from the
configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests). configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests).
## Use taints and tolerations for dedicated PostgreSQL nodes ## Use taints, tolerations and node affinity for dedicated PostgreSQL nodes
To ensure Postgres pods are running on nodes without any other application pods, To ensure Postgres pods are running on nodes without any other application pods,
you can use [taints and tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) you can use [taints and tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
@ -426,6 +603,28 @@ spec:
effect: NoSchedule effect: NoSchedule
``` ```
If you need the pods to be scheduled on specific nodes you may use [node affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/)
to specify a set of label(s), of which a prospective host node must have at least one. This could be used to
place nodes with certain hardware capabilities (e.g. SSD drives) in certain environments or network segments,
e.g. for PCI compliance.
```yaml
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: acid-minimal-cluster
spec:
teamId: "ACID"
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: environment
operator: In
values:
- pci
```
## How to clone an existing PostgreSQL cluster ## How to clone an existing PostgreSQL cluster
You can spin up a new cluster as a clone of the existing one, using a `clone` You can spin up a new cluster as a clone of the existing one, using a `clone`
@ -437,6 +636,10 @@ section in the spec. There are two options here:
Note, that cloning can also be used for [major version upgrades](administrator.md#minor-and-major-version-upgrade) Note, that cloning can also be used for [major version upgrades](administrator.md#minor-and-major-version-upgrade)
of PostgreSQL. of PostgreSQL.
## In-place major version upgrade
Starting with Spilo 13, operator supports in-place major version upgrade to a higher major version (e.g. from PG 10 to PG 12). To trigger the upgrade, simply increase the version in the manifest. It is your responsibility to test your applications against the new version before the upgrade; downgrading is not supported. The easiest way to do so is to try the upgrade on the cloned cluster first. For details of how Spilo does the upgrade [see here](https://github.com/zalando/spilo/pull/488), operator implementation is described [in the admin docs](administrator.md#minor-and-major-version-upgrade).
### Clone from S3 ### Clone from S3
Cloning from S3 has the advantage that there is no impact on your production Cloning from S3 has the advantage that there is no impact on your production
@ -698,11 +901,17 @@ manifest:
```yaml ```yaml
spec: spec:
enableConnectionPooler: true enableConnectionPooler: true
enableReplicaConnectionPooler: true
``` ```
This will tell the operator to create a connection pooler with default This will tell the operator to create a connection pooler with default
configuration, through which one can access the master via a separate service configuration, through which one can access the master via a separate service
`{cluster-name}-pooler`. In most of the cases the `{cluster-name}-pooler`. With the first option, connection pooler for master service
is created and with the second option, connection pooler for replica is created.
Note that both of these flags are independent of each other and user can set or
unset any of them as per their requirements without any effect on the other.
In most of the cases the
[default configuration](reference/operator_parameters.md#connection-pooler-configuration) [default configuration](reference/operator_parameters.md#connection-pooler-configuration)
should be good enough. To configure a new connection pooler individually for should be good enough. To configure a new connection pooler individually for
each Postgres cluster, specify: each Postgres cluster, specify:

View File

@ -1,8 +1,12 @@
FROM ubuntu:18.04 # An image to run e2e tests.
# The image does not include the tests; all necessary files are bind-mounted when a container starts.
FROM ubuntu:20.04
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>" LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
COPY manifests ./manifests ENV TERM xterm-256color
COPY requirements.txt tests ./
COPY requirements.txt ./
COPY scm-source.json ./
RUN apt-get update \ RUN apt-get update \
&& apt-get install --no-install-recommends -y \ && apt-get install --no-install-recommends -y \
@ -10,14 +14,15 @@ RUN apt-get update \
python3-setuptools \ python3-setuptools \
python3-pip \ python3-pip \
curl \ curl \
vim \
&& pip3 install --no-cache-dir -r requirements.txt \ && pip3 install --no-cache-dir -r requirements.txt \
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl \ && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl \
&& chmod +x ./kubectl \ && chmod +x ./kubectl \
&& mv ./kubectl /usr/local/bin/kubectl \ && mv ./kubectl /usr/local/bin/kubectl \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
ARG VERSION=dev # working line
RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" ./__init__.py # python3 -m unittest discover -v --failfast -k test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade --start-directory tests
ENTRYPOINT ["python3", "-m", "unittest"]
CMD ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"] CMD ["discover","-v","--failfast","--start-directory","/tests"]

View File

@ -1,6 +1,6 @@
.PHONY: clean copy docker push tools test .PHONY: clean copy docker push tools test
BINARY ?= postgres-operator-e2e-tests BINARY ?= postgres-operator-e2e-tests-runner
BUILD_FLAGS ?= -v BUILD_FLAGS ?= -v
CGO_ENABLED ?= 0 CGO_ENABLED ?= 0
ifeq ($(RACE),1) ifeq ($(RACE),1)
@ -34,15 +34,23 @@ copy: clean
mkdir manifests mkdir manifests
cp ../manifests -r . cp ../manifests -r .
docker: copy docker: scm-source.json
docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" . docker build -t "$(IMAGE):$(TAG)" .
scm-source.json: ../.git
echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json
push: docker push: docker
docker push "$(IMAGE):$(TAG)" docker push "$(IMAGE):$(TAG)"
tools: docker tools:
# install pinned version of 'kind' # install pinned version of 'kind'
GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1 # go get must run outside of a dir with a (module-based) Go project !
# otherwise go get updates project's dependencies and/or behaves differently
cd "/tmp" && GO111MODULE=on go get sigs.k8s.io/kind@v0.9.0
e2etest: e2etest: tools copy clean
./run.sh ./run.sh main
cleanup: clean
./run.sh cleanup

View File

@ -12,6 +12,10 @@ Docker.
Docker Docker
Go Go
# Notice
The `manifest` folder in e2e tests folder is not commited to git, it comes from `/manifests`
## Build test runner ## Build test runner
In the directory of the cloned Postgres Operator repository change to the e2e In the directory of the cloned Postgres Operator repository change to the e2e
@ -29,12 +33,78 @@ runtime.
In the e2e folder you can invoke tests either with `make test` or with: In the e2e folder you can invoke tests either with `make test` or with:
```bash ```bash
./run.sh ./run.sh main
``` ```
To run both the build and test step you can invoke `make e2e` from the parent To run both the build and test step you can invoke `make e2e` from the parent
directory. directory.
To run the end 2 end test and keep the kind state execute:
```bash
NOCLEANUP=True ./run.sh main
```
## Run indidual test
After having executed a normal E2E run with `NOCLEANUP=True` Kind still continues to run, allowing you subsequent test runs.
To run an individual test, run the following command in the `e2e` directory
```bash
NOCLEANUP=True ./run.sh main tests.test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade
```
## Inspecting Kind
If you want to inspect Kind/Kubernetes cluster, switch `kubeconfig` file and context
```bash
# save the old config in case you have it
export KUBECONFIG_SAVED=$KUBECONFIG
# use the one created by e2e tests
export KUBECONFIG=/tmp/kind-config-postgres-operator-e2e-tests
# this kubeconfig defines a single context
kubectl config use-context kind-postgres-operator-e2e-tests
```
or use the following script to exec into the K8s setup and then use `kubectl`
```bash
./exec_into_env.sh
# use kubectl
kubectl get pods
# watch relevant objects
./scripts/watch_objects.sh
# get operator logs
./scripts/get_logs.sh
```
If you want to inspect the state of the `kind` cluster manually with a single command, add a `context` flag
```bash
kubectl get pods --context kind-kind
```
or set the context for a few commands at once
## Cleaning up Kind
To cleanup kind and start fresh
```bash
e2e/run.sh cleanup
```
That also helps in case you see the
```
ERROR: no nodes found for cluster "postgres-operator-e2e-tests"
```
that happens when the `kind` cluster was deleted manually but its configuraiton file was not.
## Covered use cases ## Covered use cases
The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py): The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py):

2
e2e/exec.sh Executable file
View File

@ -0,0 +1,2 @@
#!/usr/bin/env bash
kubectl exec -i $1 -- sh -c "$2"

14
e2e/exec_into_env.sh Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash
export cluster_name="postgres-operator-e2e-tests"
export kubeconfig_path="/tmp/kind-config-${cluster_name}"
export operator_image="registry.opensource.zalan.do/acid/postgres-operator:latest"
export e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3"
docker run -it --entrypoint /bin/bash --network=host -e "TERM=xterm-256color" \
--mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \
--mount type=bind,source="$(readlink -f manifests)",target=/manifests \
--mount type=bind,source="$(readlink -f tests)",target=/tests \
--mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \
--mount type=bind,source="$(readlink -f scripts)",target=/scripts \
-e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}"

View File

@ -1,5 +1,5 @@
kind: Cluster kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3 apiVersion: kind.x-k8s.io/v1alpha4
nodes: nodes:
- role: control-plane - role: control-plane
- role: worker - role: worker

View File

@ -1,3 +1,3 @@
kubernetes==9.0.0 kubernetes==11.0.0
timeout_decorator==0.4.1 timeout_decorator==0.4.1
pyyaml==5.1 pyyaml==5.3.1

View File

@ -6,71 +6,86 @@ set -o nounset
set -o pipefail set -o pipefail
IFS=$'\n\t' IFS=$'\n\t'
cd $(dirname "$0");
readonly cluster_name="postgres-operator-e2e-tests" readonly cluster_name="postgres-operator-e2e-tests"
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}" readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-13-e2e:0.3"
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3"
export GOPATH=${GOPATH-~/go}
export PATH=${GOPATH}/bin:$PATH
echo "Clustername: ${cluster_name}"
echo "Kubeconfig path: ${kubeconfig_path}"
function pull_images(){ function pull_images(){
operator_tag=$(git describe --tags --always --dirty) operator_tag=$(git describe --tags --always --dirty)
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]] if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]]
then then
docker pull registry.opensource.zalan.do/acid/postgres-operator:latest docker pull registry.opensource.zalan.do/acid/postgres-operator:latest
fi fi
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:${operator_tag}) ]]
then
docker pull registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:latest
fi
operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1) operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1)
e2e_test_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests" --format "{{.Repository}}:{{.Tag}}" | head -1)
} }
function start_kind(){ function start_kind(){
echo "Starting kind for e2e tests"
# avoid interference with previous test runs # avoid interference with previous test runs
if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]] if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]]
then then
kind delete cluster --name ${cluster_name} kind delete cluster --name ${cluster_name}
fi fi
kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml export KUBECONFIG="${kubeconfig_path}"
kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml
docker pull "${spilo_image}"
kind load docker-image "${spilo_image}" --name ${cluster_name}
}
function load_operator_image() {
echo "Loading operator image"
export KUBECONFIG="${kubeconfig_path}"
kind load docker-image "${operator_image}" --name ${cluster_name} kind load docker-image "${operator_image}" --name ${cluster_name}
kind load docker-image "${e2e_test_image}" --name ${cluster_name}
KUBECONFIG="$(kind get kubeconfig-path --name=${cluster_name})"
export KUBECONFIG
} }
function set_kind_api_server_ip(){ function set_kind_api_server_ip(){
echo "Setting up kind API server ip"
# use the actual kubeconfig to connect to the 'kind' API server # use the actual kubeconfig to connect to the 'kind' API server
# but update the IP address of the API server to the one from the Docker 'bridge' network # but update the IP address of the API server to the one from the Docker 'bridge' network
cp "${KUBECONFIG}" /tmp
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane) readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}" sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
} }
function run_tests(){ function run_tests(){
echo "Running tests... image: ${e2e_test_runner_image}"
# tests modify files in ./manifests, so we mount a copy of this directory done by the e2e Makefile
docker run --rm --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_image}" docker run --rm --network=host -e "TERM=xterm-256color" \
--mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \
--mount type=bind,source="$(readlink -f manifests)",target=/manifests \
--mount type=bind,source="$(readlink -f tests)",target=/tests \
--mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \
--mount type=bind,source="$(readlink -f scripts)",target=/scripts \
-e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}" ${E2E_TEST_CASE-} $@
} }
function clean_up(){ function cleanup(){
echo "Executing cleanup"
unset KUBECONFIG unset KUBECONFIG
kind delete cluster --name ${cluster_name} kind delete cluster --name ${cluster_name}
rm -rf ${kubeconfig_path} rm -rf ${kubeconfig_path}
} }
function main(){ function main(){
echo "Entering main function..."
trap "clean_up" QUIT TERM EXIT [[ -z ${NOCLEANUP-} ]] && trap "cleanup" QUIT TERM EXIT
pull_images pull_images
start_kind [[ ! -f ${kubeconfig_path} ]] && start_kind
load_operator_image
set_kind_api_server_ip set_kind_api_server_ip
run_tests
shift
run_tests $@
exit 0 exit 0
} }
main "$@" "$1" $@

7
e2e/scripts/cleanup.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
kubectl delete postgresql acid-minimal-cluster
kubectl delete deployments -l application=db-connection-pooler,cluster-name=acid-minimal-cluster
kubectl delete statefulsets -l application=spilo,cluster-name=acid-minimal-cluster
kubectl delete services -l application=spilo,cluster-name=acid-minimal-cluster
kubectl delete configmap postgres-operator
kubectl delete deployment postgres-operator

2
e2e/scripts/get_logs.sh Executable file
View File

@ -0,0 +1,2 @@
#!/bin/bash
kubectl logs $(kubectl get pods -l name=postgres-operator --field-selector status.phase=Running -o jsonpath='{.items..metadata.name}')

33
e2e/scripts/watch_objects.sh Executable file
View File

@ -0,0 +1,33 @@
#!/bin/bash
watch -c "
kubectl get postgresql --all-namespaces
echo
echo -n 'Rolling upgrade pending: '
kubectl get statefulset -o jsonpath='{.items..metadata.annotations.zalando-postgres-operator-rolling-update-required}'
echo
echo
echo 'Pods'
kubectl get pods -l application=spilo -o wide --all-namespaces
echo
kubectl get pods -l application=db-connection-pooler -o wide --all-namespaces
echo
echo 'Statefulsets'
kubectl get statefulsets --all-namespaces
echo
echo 'Deployments'
kubectl get deployments --all-namespaces -l application=db-connection-pooler
kubectl get deployments --all-namespaces -l application=postgres-operator
echo
echo
echo 'Step from operator deployment'
kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.annotations.step}'
echo
echo
echo 'Spilo Image in statefulset'
kubectl get pods -l application=spilo -o jsonpath='{.items..spec.containers..image}'
echo
echo
echo 'Queue Status'
kubectl exec -it \$(kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.name}') -- curl localhost:8080/workers/all/status/
echo"

532
e2e/tests/k8s_api.py Normal file
View File

@ -0,0 +1,532 @@
import json
import time
import subprocess
import warnings
from kubernetes import client, config
from kubernetes.client.rest import ApiException
def to_selector(labels):
return ",".join(["=".join(lbl) for lbl in labels.items()])
class K8sApi:
def __init__(self):
# https://github.com/kubernetes-client/python/issues/309
warnings.simplefilter("ignore", ResourceWarning)
self.config = config.load_kube_config()
self.k8s_client = client.ApiClient()
self.core_v1 = client.CoreV1Api()
self.apps_v1 = client.AppsV1Api()
self.batch_v1_beta1 = client.BatchV1beta1Api()
self.custom_objects_api = client.CustomObjectsApi()
self.policy_v1_beta1 = client.PolicyV1beta1Api()
self.storage_v1_api = client.StorageV1Api()
class K8s:
'''
Wraps around K8s api client and helper methods.
'''
RETRY_TIMEOUT_SEC = 1
def __init__(self, labels='x=y', namespace='default'):
self.api = K8sApi()
self.labels = labels
self.namespace = namespace
def get_pg_nodes(self, pg_cluster_name, namespace='default'):
master_pod_node = ''
replica_pod_nodes = []
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master':
master_pod_node = pod.spec.node_name
elif pod.metadata.labels.get('spilo-role') == 'replica':
replica_pod_nodes.append(pod.spec.node_name)
return master_pod_node, replica_pod_nodes
def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'):
m = []
r = []
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running':
m.append(pod.spec.node_name)
elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running':
r.append(pod.spec.node_name)
return m, r
def wait_for_operator_pod_start(self):
self.wait_for_pod_start("name=postgres-operator")
# give operator time to subscribe to objects
time.sleep(1)
return True
def get_operator_pod(self):
pods = self.api.core_v1.list_namespaced_pod(
'default', label_selector='name=postgres-operator'
).items
pods = list(filter(lambda x: x.status.phase == 'Running', pods))
if len(pods):
return pods[0]
return None
def get_operator_log(self):
operator_pod = self.get_operator_pod()
pod_name = operator_pod.metadata.name
return self.api.core_v1.read_namespaced_pod_log(
name=pod_name,
namespace='default'
)
def pg_get_status(self, name="acid-minimal-cluster", namespace="default"):
pg = self.api.custom_objects_api.get_namespaced_custom_object(
"acid.zalan.do", "v1", namespace, "postgresqls", name)
return pg.get("status", {}).get("PostgresClusterStatus", None)
def wait_for_pod_start(self, pod_labels, namespace='default'):
pod_phase = 'No pod running'
while pod_phase != 'Running':
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items
if pods:
pod_phase = pods[0].status.phase
time.sleep(self.RETRY_TIMEOUT_SEC)
def get_service_type(self, svc_labels, namespace='default'):
svc_type = ''
svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items
for svc in svcs:
svc_type = svc.spec.type
return svc_type
def check_service_annotations(self, svc_labels, annotations, namespace='default'):
svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items
for svc in svcs:
for key, value in annotations.items():
if not svc.metadata.annotations or key not in svc.metadata.annotations or svc.metadata.annotations[key] != value:
print("Expected key {} not found in service annotations {}".format(key, svc.metadata.annotations))
return False
return True
def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'):
ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items
for sset in ssets:
for key, value in annotations.items():
if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value:
print("Expected key {} not found in statefulset annotations {}".format(key, sset.metadata.annotations))
return False
return True
def scale_cluster(self, number_of_instances, name="acid-minimal-cluster", namespace="default"):
body = {
"spec": {
"numberOfInstances": number_of_instances
}
}
self.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", namespace, "postgresqls", name, body)
def wait_for_running_pods(self, labels, number, namespace=''):
while self.count_pods_with_label(labels) != number:
time.sleep(self.RETRY_TIMEOUT_SEC)
def wait_for_pods_to_stop(self, labels, namespace=''):
while self.count_pods_with_label(labels) != 0:
time.sleep(self.RETRY_TIMEOUT_SEC)
def wait_for_service(self, labels, namespace='default'):
def get_services():
return self.api.core_v1.list_namespaced_service(
namespace, label_selector=labels
).items
while not get_services():
time.sleep(self.RETRY_TIMEOUT_SEC)
def count_pods_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
def count_services_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items)
def count_endpoints_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items)
def count_secrets_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items)
def count_statefulsets_with_label(self, labels, namespace='default'):
return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items)
def count_deployments_with_label(self, labels, namespace='default'):
return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items)
def count_pdbs_with_label(self, labels, namespace='default'):
return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget(
namespace, label_selector=labels).items)
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
pod_phase = 'Failing over'
new_pod_node = ''
while (pod_phase != 'Running') or (new_pod_node not in failover_targets):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
if pods:
new_pod_node = pods[0].spec.node_name
pod_phase = pods[0].status.phase
time.sleep(self.RETRY_TIMEOUT_SEC)
def get_logical_backup_job(self, namespace='default'):
return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
def wait_for_logical_backup_job(self, expected_num_of_jobs):
while (len(self.get_logical_backup_job().items) != expected_num_of_jobs):
time.sleep(self.RETRY_TIMEOUT_SEC)
def wait_for_logical_backup_job_deletion(self):
self.wait_for_logical_backup_job(expected_num_of_jobs=0)
def wait_for_logical_backup_job_creation(self):
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
def delete_operator_pod(self, step="Delete operator pod"):
# patching the pod template in the deployment restarts the operator pod
self.api.apps_v1.patch_namespaced_deployment("postgres-operator", "default", {"spec": {"template": {"metadata": {"annotations": {"step": "{}-{}".format(step, time.time())}}}}})
self.wait_for_operator_pod_start()
def update_config(self, config_map_patch, step="Updating operator deployment"):
self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch)
self.delete_operator_pod(step=step)
def patch_statefulset(self, data, name="acid-minimal-cluster", namespace="default"):
self.api.apps_v1.patch_namespaced_stateful_set(name, namespace, data)
def create_with_kubectl(self, path):
return subprocess.run(
["kubectl", "apply", "-f", path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def exec_with_kubectl(self, pod, cmd):
return subprocess.run(["./exec.sh", pod, cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def get_patroni_state(self, pod):
r = self.exec_with_kubectl(pod, "patronictl list -f json")
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":
return []
return json.loads(r.stdout.decode())
def get_operator_state(self):
pod = self.get_operator_pod()
if pod is None:
return None
pod = pod.metadata.name
r = self.exec_with_kubectl(pod, "curl localhost:8080/workers/all/status/")
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{":
return None
return json.loads(r.stdout.decode())
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
result = self.get_patroni_state(pod)
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
def get_deployment_replica_count(self, name="acid-minimal-cluster-pooler", namespace="default"):
try:
deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace)
return deployment.spec.replicas
except ApiException:
return None
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1)
if len(ssets.items) == 0:
return None
return ssets.items[0].spec.template.spec.containers[0].image
def get_effective_pod_image(self, pod_name, namespace='default'):
'''
Get the Spilo image pod currently uses. In case of lazy rolling updates
it may differ from the one specified in the stateful set.
'''
pod = self.api.core_v1.list_namespaced_pod(
namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name)
if len(pod.items) == 0:
return None
return pod.items[0].spec.containers[0].image
def get_cluster_leader_pod(self, pg_cluster_name, namespace='default'):
labels = {
'application': 'spilo',
'cluster-name': pg_cluster_name,
'spilo-role': 'master',
}
pods = self.api.core_v1.list_namespaced_pod(
namespace, label_selector=to_selector(labels)).items
if pods:
return pods[0]
class K8sBase:
'''
K8s basic API wrapper class supposed to be inherited by other more specific classes for e2e tests
'''
RETRY_TIMEOUT_SEC = 1
def __init__(self, labels='x=y', namespace='default'):
self.api = K8sApi()
self.labels = labels
self.namespace = namespace
def get_pg_nodes(self, pg_cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'):
master_pod_node = ''
replica_pod_nodes = []
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_labels)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master':
master_pod_node = pod.spec.node_name
elif pod.metadata.labels.get('spilo-role') == 'replica':
replica_pod_nodes.append(pod.spec.node_name)
return master_pod_node, replica_pod_nodes
def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'):
m = []
r = []
podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running':
m.append(pod.spec.node_name)
elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running':
r.append(pod.spec.node_name)
return m, r
def wait_for_operator_pod_start(self):
self.wait_for_pod_start("name=postgres-operator")
def get_operator_pod(self):
pods = self.api.core_v1.list_namespaced_pod(
'default', label_selector='name=postgres-operator'
).items
if pods:
return pods[0]
return None
def get_operator_log(self):
operator_pod = self.get_operator_pod()
pod_name = operator_pod.metadata.name
return self.api.core_v1.read_namespaced_pod_log(
name=pod_name,
namespace='default'
)
def wait_for_pod_start(self, pod_labels, namespace='default'):
pod_phase = 'No pod running'
while pod_phase != 'Running':
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items
if pods:
pod_phase = pods[0].status.phase
time.sleep(self.RETRY_TIMEOUT_SEC)
def get_service_type(self, svc_labels, namespace='default'):
svc_type = ''
svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items
for svc in svcs:
svc_type = svc.spec.type
return svc_type
def check_service_annotations(self, svc_labels, annotations, namespace='default'):
svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items
for svc in svcs:
for key, value in annotations.items():
if key not in svc.metadata.annotations or svc.metadata.annotations[key] != value:
print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotation))
return False
return True
def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'):
ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items
for sset in ssets:
for key, value in annotations.items():
if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value:
print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotation))
return False
return True
def scale_cluster(self, number_of_instances, name="acid-minimal-cluster", namespace="default"):
body = {
"spec": {
"numberOfInstances": number_of_instances
}
}
self.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", namespace, "postgresqls", name, body)
def wait_for_running_pods(self, labels, number, namespace=''):
while self.count_pods_with_label(labels) != number:
time.sleep(self.RETRY_TIMEOUT_SEC)
def wait_for_pods_to_stop(self, labels, namespace=''):
while self.count_pods_with_label(labels) != 0:
time.sleep(self.RETRY_TIMEOUT_SEC)
def wait_for_service(self, labels, namespace='default'):
def get_services():
return self.api.core_v1.list_namespaced_service(
namespace, label_selector=labels
).items
while not get_services():
time.sleep(self.RETRY_TIMEOUT_SEC)
def count_pods_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
def count_services_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items)
def count_endpoints_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items)
def count_secrets_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items)
def count_statefulsets_with_label(self, labels, namespace='default'):
return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items)
def count_deployments_with_label(self, labels, namespace='default'):
return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items)
def count_pdbs_with_label(self, labels, namespace='default'):
return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget(
namespace, label_selector=labels).items)
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
pod_phase = 'Failing over'
new_pod_node = ''
while (pod_phase != 'Running') or (new_pod_node not in failover_targets):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
if pods:
new_pod_node = pods[0].spec.node_name
pod_phase = pods[0].status.phase
time.sleep(self.RETRY_TIMEOUT_SEC)
def get_logical_backup_job(self, namespace='default'):
return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
def wait_for_logical_backup_job(self, expected_num_of_jobs):
while (len(self.get_logical_backup_job().items) != expected_num_of_jobs):
time.sleep(self.RETRY_TIMEOUT_SEC)
def wait_for_logical_backup_job_deletion(self):
self.wait_for_logical_backup_job(expected_num_of_jobs=0)
def wait_for_logical_backup_job_creation(self):
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
def delete_operator_pod(self, step="Delete operator deplyment"):
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
self.wait_for_operator_pod_start()
def update_config(self, config_map_patch, step="Updating operator deployment"):
self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch)
self.delete_operator_pod(step=step)
def create_with_kubectl(self, path):
return subprocess.run(
["kubectl", "apply", "-f", path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def exec_with_kubectl(self, pod, cmd):
return subprocess.run(["./exec.sh", pod, cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def get_patroni_state(self, pod):
r = self.exec_with_kubectl(pod, "patronictl list -f json")
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":
return []
return json.loads(r.stdout.decode())
def get_patroni_running_members(self, pod):
result = self.get_patroni_state(pod)
return list(filter(lambda x: x["State"] == "running", result))
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1)
if len(ssets.items) == 0:
return None
return ssets.items[0].spec.template.spec.containers[0].image
def get_effective_pod_image(self, pod_name, namespace='default'):
'''
Get the Spilo image pod currently uses. In case of lazy rolling updates
it may differ from the one specified in the stateful set.
'''
pod = self.api.core_v1.list_namespaced_pod(
namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name)
if len(pod.items) == 0:
return None
return pod.items[0].spec.containers[0].image
"""
Inspiriational classes towards easier writing of end to end tests with one cluster per test case
"""
class K8sOperator(K8sBase):
def __init__(self, labels="name=postgres-operator", namespace="default"):
super().__init__(labels, namespace)
class K8sPostgres(K8sBase):
def __init__(self, labels="cluster-name=acid-minimal-cluster", namespace="default"):
super().__init__(labels, namespace)
def get_pg_nodes(self):
master_pod_node = ''
replica_pod_nodes = []
podsList = self.api.core_v1.list_namespaced_pod(self.namespace, label_selector=self.labels)
for pod in podsList.items:
if pod.metadata.labels.get('spilo-role') == 'master':
master_pod_node = pod.spec.node_name
elif pod.metadata.labels.get('spilo-role') == 'replica':
replica_pod_nodes.append(pod.spec.node_name)
return master_pod_node, replica_pod_nodes

File diff suppressed because it is too large Load Diff

33
go.mod
View File

@ -1,23 +1,22 @@
module github.com/zalando/postgres-operator module github.com/zalando/postgres-operator
go 1.14 go 1.15
require ( require (
github.com/aws/aws-sdk-go v1.29.33 github.com/aws/aws-sdk-go v1.36.3
github.com/emicklei/go-restful v2.9.6+incompatible // indirect github.com/golang/mock v1.4.4
github.com/evanphx/json-patch v4.5.0+incompatible // indirect github.com/lib/pq v1.9.0
github.com/googleapis/gnostic v0.3.0 // indirect
github.com/lib/pq v1.3.0
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a github.com/r3labs/diff v1.1.0
github.com/sirupsen/logrus v1.5.0 github.com/sirupsen/logrus v1.7.0
github.com/stretchr/testify v1.4.0 github.com/stretchr/testify v1.6.1
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b // indirect golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c
gopkg.in/yaml.v2 v2.2.8 golang.org/x/mod v0.4.0 // indirect
k8s.io/api v0.18.2 golang.org/x/tools v0.0.0-20201207204333-a835c872fcea // indirect
k8s.io/apiextensions-apiserver v0.18.2 gopkg.in/yaml.v2 v2.4.0
k8s.io/apimachinery v0.18.2 k8s.io/api v0.19.4
k8s.io/client-go v11.0.0+incompatible k8s.io/apiextensions-apiserver v0.19.3
k8s.io/code-generator v0.18.2 k8s.io/apimachinery v0.19.4
sigs.k8s.io/kind v0.5.1 // indirect k8s.io/client-go v0.19.3
k8s.io/code-generator v0.19.4
) )

430
go.sum
View File

@ -1,17 +1,33 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
@ -21,41 +37,57 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.29.33 h1:WP85+WHalTFQR2wYp5xR2sjiVAZXew2bBQXGU1QJBXI= github.com/aws/aws-sdk-go v1.36.3 h1:KYpG5OegwW3xgOsMxy01nj/Td281yxi1Ha2lJQJs4tI=
github.com/aws/aws-sdk-go v1.29.33/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.36.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
@ -63,31 +95,35 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.6+incompatible h1:tfrHha8zJ01ywiOEC1miGY8st1/igzWB8OmvPgoYX7w=
github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI=
github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY=
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
@ -105,9 +141,11 @@ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY=
github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI=
github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
@ -118,6 +156,7 @@ github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA=
github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
@ -127,53 +166,67 @@ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tF
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.5 h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0=
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0=
github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
@ -181,41 +234,43 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8=
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190620125010-da37f6c1e481/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
@ -223,29 +278,32 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+pW6rOkFdld9QQ7jRydBKKM6jyPVI= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+pW6rOkFdld9QQ7jRydBKKM6jyPVI=
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
@ -255,90 +313,133 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a h1:2v4Ipjxa3sh+xn6GvtgrMub2ci4ZLQMvTaYIba2lfdc= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a/go.mod h1:ozniNEFS3j1qCwHKdvraMn1WJOsUxHd7lYfukEIS4cs= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M=
github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5 h1:Gqga3zA9tdAcfqobUGjSoCob5L3f8Dt5EuOp3ihNZko=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c h1:9HhBz5L/UjnK9XLtiZhYAdue5BVKep3PMmS2LuPDt8k=
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -350,52 +451,74 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190621203818-d432491b9138/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -404,40 +527,83 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b h1:zSzQJAznWxAh9fZxiPy2FZo+ZZEYoYFYYDYdOrU7AaM= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201207204333-a835c872fcea h1:LgKM3cNs8xO6GK1ZVK0nasPn7IN39Sz9EBTwQLyishk=
golang.org/x/tools v0.0.0-20201207204333-a835c872fcea/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@ -446,57 +612,51 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs=
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo=
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk=
k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8= k8s.io/apiextensions-apiserver v0.19.3 h1:WZxBypSHW4SdXHbdPTS/Jy7L2la6Niggs8BuU5o+avo=
k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= k8s.io/apiextensions-apiserver v0.19.3/go.mod h1:igVEkrE9TzInc1tYE7qSqxaLg/rEAp6B5+k9Q7+IC8Q=
k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0=
k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apiserver v0.19.3/go.mod h1:bx6dMm+H6ifgKFpCQT/SAhPwhzoeIMlHIaibomUDec0=
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= k8s.io/client-go v0.19.3 h1:ctqR1nQ52NUs6LpI0w+a5U+xjYwflFwA13OJKcicMxg=
k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE= k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM=
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/code-generator v0.19.3/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= k8s.io/code-generator v0.19.4 h1:c8IL7RgTgJaYgr2bYMgjN0WikHnohbBhEgajfIkuP5I=
k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/code-generator v0.19.4/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso= k8s.io/component-base v0.19.3/go.mod h1:WhLWSIefQn8W8jxSLl5WNiR6z8oyMe/8Zywg7alOkRc=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 h1:t4L10Qfx/p7ASH3gXCdIUtPbbIuegCoUJf3TMSFekjw=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ=
k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
sigs.k8s.io/kind v0.5.1 h1:BYnHEJ9DC+0Yjlyyehqd3xnKtEmFdLKU8QxqOqvQzdw=
sigs.k8s.io/kind v0.5.1/go.mod h1:L+Kcoo83/D1+ryU5P2VFbvYm0oqbkJn9zTZq0KNxW68=
sigs.k8s.io/kustomize/v3 v3.1.1-0.20190821175718-4b67a6de1296 h1:iQaIG5Dq+3qSiaFrJ/l/0MjjxKmdwyVNpKRYJwUe/+0=
sigs.k8s.io/kustomize/v3 v3.1.1-0.20190821175718-4b67a6de1296/go.mod h1:ztX4zYc/QIww3gSripwF7TBOarBTm5BvyAMem0kCzOE=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@ -24,19 +24,20 @@ package cmd
import ( import (
"fmt" "fmt"
"log"
"github.com/spf13/cobra" "github.com/spf13/cobra"
postgresConstants "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" postgresConstants "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextbeta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"log"
) )
// checkCmd represent kubectl pg check. // checkCmd represent kubectl pg check.
var checkCmd = &cobra.Command{ var checkCmd = &cobra.Command{
Use: "check", Use: "check",
Short: "Checks the Postgres operator is installed in the k8s cluster", Short: "Checks the Postgres operator is installed in the k8s cluster",
Long: `Checks that the Postgres CRD is registered in a k8s cluster. Long: `Checks that the Postgres CRD is registered in a k8s cluster.
This means that the operator pod was able to start normally.`, This means that the operator pod was able to start normally.`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
check() check()
@ -47,9 +48,9 @@ kubectl pg check
} }
// check validates postgresql CRD registered or not. // check validates postgresql CRD registered or not.
func check() *v1beta1.CustomResourceDefinition { func check() *v1.CustomResourceDefinition {
config := getConfig() config := getConfig()
apiExtClient, err := apiextbeta1.NewForConfig(config) apiExtClient, err := apiextv1.NewForConfig(config)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }

View File

@ -25,6 +25,13 @@ package cmd
import ( import (
"flag" "flag"
"fmt" "fmt"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
v1 "k8s.io/api/apps/v1" v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -32,12 +39,6 @@ import (
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir" "k8s.io/client-go/util/homedir"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
) )
const ( const (
@ -88,7 +89,7 @@ func confirmAction(clusterName string, namespace string) {
} }
clusterDetails := strings.Split(confirmClusterDetails, "/") clusterDetails := strings.Split(confirmClusterDetails, "/")
if clusterDetails[0] != namespace || clusterDetails[1] != clusterName { if clusterDetails[0] != namespace || clusterDetails[1] != clusterName {
fmt.Printf("cluster name or namespace doesn't match. Please re-enter %s/%s\nHint: Press (ctrl+c) to exit\n", namespace, clusterName) fmt.Printf("cluster name or namespace does not match. Please re-enter %s/%s\nHint: Press (ctrl+c) to exit\n", namespace, clusterName)
} else { } else {
return return
} }

View File

@ -6,8 +6,10 @@ metadata:
# environment: demo # environment: demo
# annotations: # annotations:
# "acid.zalan.do/controller": "second-operator" # "acid.zalan.do/controller": "second-operator"
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
spec: spec:
dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
teamId: "acid" teamId: "acid"
numberOfInstances: 2 numberOfInstances: 2
users: # Application/Robot users users: # Application/Robot users
@ -16,7 +18,8 @@ spec:
- createdb - createdb
enableMasterLoadBalancer: false enableMasterLoadBalancer: false
enableReplicaLoadBalancer: false enableReplicaLoadBalancer: false
# enableConnectionPooler: true # not needed when connectionPooler section is present (see below) enableConnectionPooler: false # enable/disable connection pooler deployment
enableReplicaConnectionPooler: false # set to enable connectionPooler for replica service
allowedSourceRanges: # load balancers' source ranges for both master and replica services allowedSourceRanges: # load balancers' source ranges for both master and replica services
- 127.0.0.1/32 - 127.0.0.1/32
databases: databases:
@ -33,8 +36,8 @@ spec:
defaultRoles: true defaultRoles: true
defaultUsers: false defaultUsers: false
postgresql: postgresql:
version: "12" version: "13"
parameters: # Expert section parameters: # Expert section
shared_buffers: "32MB" shared_buffers: "32MB"
max_connections: "10" max_connections: "10"
log_statement: "all" log_statement: "all"
@ -66,6 +69,8 @@ spec:
# name: my-config-map # name: my-config-map
enableShmVolume: true enableShmVolume: true
# spiloRunAsUser: 101
# spiloRunAsGroup: 103
# spiloFSGroup: 103 # spiloFSGroup: 103
# podAnnotations: # podAnnotations:
# annotation.key: value # annotation.key: value
@ -88,9 +93,9 @@ spec:
encoding: "UTF8" encoding: "UTF8"
locale: "en_US.UTF-8" locale: "en_US.UTF-8"
data-checksums: "true" data-checksums: "true"
pg_hba: # pg_hba:
- hostssl all all 0.0.0.0/0 md5 # - hostssl all all 0.0.0.0/0 md5
- host all all 0.0.0.0/0 md5 # - host all all 0.0.0.0/0 md5
# slots: # slots:
# permanent_physical_1: # permanent_physical_1:
# type: physical # type: physical
@ -122,18 +127,19 @@ spec:
# - 01:00-06:00 #UTC # - 01:00-06:00 #UTC
# - Sat:00:00-04:00 # - Sat:00:00-04:00
connectionPooler: # overwrite custom properties for connection pooler deployments
numberOfInstances: 2 # connectionPooler:
mode: "transaction" # numberOfInstances: 2
schema: "pooler" # mode: "transaction"
user: "pooler" # schema: "pooler"
resources: # user: "pooler"
requests: # resources:
cpu: 300m # requests:
memory: 100Mi # cpu: 300m
limits: # memory: 100Mi
cpu: "1" # limits:
memory: 100Mi # cpu: "1"
# memory: 100Mi
initContainers: initContainers:
- name: date - name: date
@ -166,3 +172,14 @@ spec:
# When TLS is enabled, also set spiloFSGroup parameter above to the relevant value. # When TLS is enabled, also set spiloFSGroup parameter above to the relevant value.
# if unknown, set it to 103 which is the usual value in the default spilo images. # if unknown, set it to 103 which is the usual value in the default spilo images.
# In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup. # In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup.
# Add node affinity support by allowing postgres pods to schedule only on nodes that
# have label: "postgres-operator:enabled" set.
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: postgres-operator
# operator: In
# values:
# - enabled

View File

@ -15,7 +15,7 @@ data:
# connection_pooler_default_cpu_request: "500m" # connection_pooler_default_cpu_request: "500m"
# connection_pooler_default_memory_limit: 100Mi # connection_pooler_default_memory_limit: 100Mi
# connection_pooler_default_memory_request: 100Mi # connection_pooler_default_memory_request: 100Mi
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7" connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-12"
# connection_pooler_max_db_connections: 60 # connection_pooler_max_db_connections: 60
# connection_pooler_mode: "transaction" # connection_pooler_mode: "transaction"
# connection_pooler_number_of_instances: 2 # connection_pooler_number_of_instances: 2
@ -29,28 +29,40 @@ data:
# default_cpu_request: 100m # default_cpu_request: 100m
# default_memory_limit: 500Mi # default_memory_limit: 500Mi
# default_memory_request: 100Mi # default_memory_request: 100Mi
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 # delete_annotation_date_key: delete-date
# delete_annotation_name_key: delete-clustername
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
# downscaler_annotations: "deployment-time,downscaler/*" # downscaler_annotations: "deployment-time,downscaler/*"
# enable_admin_role_for_users: "true" # enable_admin_role_for_users: "true"
# enable_crd_validation: "true" # enable_crd_validation: "true"
# enable_database_access: "true" # enable_database_access: "true"
enable_ebs_gp3_migration: "false"
# enable_ebs_gp3_migration_max_size: "1000"
# enable_init_containers: "true" # enable_init_containers: "true"
# enable_lazy_spilo_upgrade: "false" # enable_lazy_spilo_upgrade: "false"
enable_master_load_balancer: "false" enable_master_load_balancer: "false"
enable_pgversion_env_var: "true"
# enable_pod_antiaffinity: "false" # enable_pod_antiaffinity: "false"
# enable_pod_disruption_budget: "true" # enable_pod_disruption_budget: "true"
# enable_postgres_team_crd: "false"
# enable_postgres_team_crd_superusers: "false"
enable_replica_load_balancer: "false" enable_replica_load_balancer: "false"
# enable_shm_volume: "true" # enable_shm_volume: "true"
# enable_sidecars: "true" # enable_sidecars: "true"
enable_spilo_wal_path_compat: "true"
# enable_team_superuser: "false" # enable_team_superuser: "false"
enable_teams_api: "false" enable_teams_api: "false"
# etcd_host: "" # etcd_host: ""
external_traffic_policy: "Cluster"
# gcp_credentials: ""
# kubernetes_use_configmaps: "false" # kubernetes_use_configmaps: "false"
# infrastructure_roles_secret_name: postgresql-infrastructure-roles # infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
# inherited_annotations: owned-by
# inherited_labels: application,environment # inherited_labels: application,environment
# kube_iam_role: "" # kube_iam_role: ""
# log_s3_bucket: "" # log_s3_bucket: ""
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0"
# logical_backup_s3_access_key_id: "" # logical_backup_s3_access_key_id: ""
logical_backup_s3_bucket: "my-bucket-url" logical_backup_s3_bucket: "my-bucket-url"
# logical_backup_s3_region: "" # logical_backup_s3_region: ""
@ -73,8 +85,10 @@ data:
# pod_antiaffinity_topology_key: "kubernetes.io/hostname" # pod_antiaffinity_topology_key: "kubernetes.io/hostname"
pod_deletion_wait_timeout: 10m pod_deletion_wait_timeout: 10m
# pod_environment_configmap: "default/my-custom-config" # pod_environment_configmap: "default/my-custom-config"
# pod_environment_secret: "my-custom-secret"
pod_label_wait_timeout: 10m pod_label_wait_timeout: 10m
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# pod_priority_class_name: "postgres-pod-priority"
pod_role_label: spilo-role pod_role_label: spilo-role
# pod_service_account_definition: "" # pod_service_account_definition: ""
pod_service_account_name: "postgres-pod" pod_service_account_name: "postgres-pod"
@ -94,12 +108,17 @@ data:
secret_name_template: "{username}.{cluster}.credentials" secret_name_template: "{username}.{cluster}.credentials"
# sidecar_docker_images: "" # sidecar_docker_images: ""
# set_memory_request_to_limit: "false" # set_memory_request_to_limit: "false"
# spilo_runasuser: 101
# spilo_runasgroup: 103
# spilo_fsgroup: 103
spilo_privileged: "false" spilo_privileged: "false"
storage_resize_mode: "pvc"
super_username: postgres super_username: postgres
# team_admin_role: "admin" # team_admin_role: "admin"
# team_api_role_configuration: "log_statement:all" # team_api_role_configuration: "log_statement:all"
# teams_api_url: http://fake-teams-api.default.svc.cluster.local # teams_api_url: http://fake-teams-api.default.svc.cluster.local
# toleration: "" # toleration: ""
# wal_gs_bucket: ""
# wal_s3_bucket: "" # wal_s3_bucket: ""
watched_namespace: "*" # listen to all namespaces watched_namespace: "*" # listen to all namespaces
workers: "4" workers: "16"

View File

@ -0,0 +1,13 @@
apiVersion: "acid.zalan.do/v1"
kind: PostgresTeam
metadata:
name: custom-team-membership
spec:
additionalSuperuserTeams:
acid:
- "postgres_superusers"
additionalTeams:
acid: []
additionalMembers:
acid:
- "elephant"

View File

@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
namespace: kube-system
name: standard
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/host-path

View File

@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: fake-teams-api name: fake-teams-api

View File

@ -0,0 +1,12 @@
apiVersion: v1
data:
# infrastructure role definition in the new format
# robot_zmon_acid_monitoring_new
user: cm9ib3Rfem1vbl9hY2lkX21vbml0b3JpbmdfbmV3
# foobar_new
password: Zm9vYmFyX25ldw==
kind: Secret
metadata:
name: postgresql-infrastructure-roles-new
namespace: default
type: Opaque

View File

@ -7,12 +7,14 @@ data:
# provide other options in the configmap. # provide other options in the configmap.
# robot_zmon_acid_monitoring # robot_zmon_acid_monitoring
user1: cm9ib3Rfem1vbl9hY2lkX21vbml0b3Jpbmc= user1: cm9ib3Rfem1vbl9hY2lkX21vbml0b3Jpbmc=
# foobar
password1: Zm9vYmFy
# robot_zmon # robot_zmon
inrole1: cm9ib3Rfem1vbg== inrole1: cm9ib3Rfem1vbg==
# testuser # testuser
user2: dGVzdHVzZXI= user2: dGVzdHVzZXI=
# foobar # testpassword
password2: Zm9vYmFy password2: dGVzdHBhc3N3b3Jk
# user batman with the password justice # user batman with the password justice
# look for other fields in the infrastructure roles configmap # look for other fields in the infrastructure roles configmap
batman: anVzdGljZQ== batman: anVzdGljZQ==

View File

@ -0,0 +1,35 @@
# will not run but is good enough for tests to fail
apiVersion: apps/v1
kind: Deployment
metadata:
name: acid-minimal-cluster-pooler
labels:
application: db-connection-pooler
connection-pooler: acid-minimal-cluster-pooler
spec:
replicas: 1
selector:
matchLabels:
application: db-connection-pooler
connection-pooler: acid-minimal-cluster-pooler
cluster-name: acid-minimal-cluster
template:
metadata:
labels:
application: db-connection-pooler
connection-pooler: acid-minimal-cluster-pooler
cluster-name: acid-minimal-cluster
spec:
serviceAccountName: postgres-operator
containers:
- name: postgres-operator
image: registry.opensource.zalan.do/acid/pgbouncer:master-12
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 250Mi
limits:
cpu: 500m
memory: 500Mi
env: []

View File

@ -18,4 +18,4 @@ spec:
preparedDatabases: preparedDatabases:
bar: {} bar: {}
postgresql: postgresql:
version: "12" version: "13"

View File

@ -26,6 +26,15 @@ rules:
- patch - patch
- update - update
- watch - watch
# operator only reads PostgresTeams
- apiGroups:
- acid.zalan.do
resources:
- postgresteams
verbs:
- get
- list
- watch
# to create or get/update CRDs when starting up # to create or get/update CRDs when starting up
- apiGroups: - apiGroups:
- apiextensions.k8s.io - apiextensions.k8s.io
@ -97,6 +106,8 @@ rules:
- delete - delete
- get - get
- list - list
- patch
- update
# to read existing PVs. Creation should be done via dynamic provisioning # to read existing PVs. Creation should be done via dynamic provisioning
- apiGroups: - apiGroups:
- "" - ""

View File

@ -1,4 +1,4 @@
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
name: operatorconfigurations.acid.zalan.do name: operatorconfigurations.acid.zalan.do
@ -11,345 +11,441 @@ spec:
singular: operatorconfiguration singular: operatorconfiguration
shortNames: shortNames:
- opconfig - opconfig
categories:
- all
scope: Namespaced scope: Namespaced
subresources: versions:
status: {} - name: v1
version: v1 served: true
validation: storage: true
openAPIV3Schema: subresources:
type: object status: {}
required: additionalPrinterColumns:
- kind - name: Image
- apiVersion type: string
- configuration description: Spilo image to be used for Pods
properties: jsonPath: .configuration.docker_image
kind: - name: Cluster-Label
type: string type: string
enum: description: Label for K8s resources created by operator
- OperatorConfiguration jsonPath: .configuration.kubernetes.cluster_name_label
apiVersion: - name: Service-Account
type: string type: string
enum: description: Name of service account to be used
- acid.zalan.do/v1 jsonPath: .configuration.kubernetes.pod_service_account_name
configuration: - name: Min-Instances
type: object type: integer
properties: description: Minimum number of instances per Postgres cluster
docker_image: jsonPath: .configuration.min_instances
type: string - name: Age
enable_crd_validation: type: date
type: boolean jsonPath: .metadata.creationTimestamp
enable_lazy_spilo_upgrade: schema:
type: boolean openAPIV3Schema:
enable_shm_volume: type: object
type: boolean required:
etcd_host: - kind
type: string - apiVersion
kubernetes_use_configmaps: - configuration
type: boolean properties:
max_instances: kind:
type: integer
minimum: -1 # -1 = disabled
min_instances:
type: integer
minimum: -1 # -1 = disabled
resync_period:
type: string
repair_period:
type: string
set_memory_request_to_limit:
type: boolean
sidecar_docker_images:
type: object
additionalProperties:
type: string
sidecars:
type: array
nullable: true
items:
type: object
additionalProperties: true
workers:
type: integer
minimum: 1
users:
type: object
properties:
replication_username:
type: string
super_username:
type: string
kubernetes:
type: object
properties:
cluster_domain:
type: string
cluster_labels:
type: object
additionalProperties:
type: string
cluster_name_label:
type: string
custom_pod_annotations:
type: object
additionalProperties:
type: string
downscaler_annotations:
type: array
items:
type: string
enable_init_containers:
type: boolean
enable_pod_antiaffinity:
type: boolean
enable_pod_disruption_budget:
type: boolean
enable_sidecars:
type: boolean
infrastructure_roles_secret_name:
type: string
inherited_labels:
type: array
items:
type: string
master_pod_move_timeout:
type: string
node_readiness_label:
type: object
additionalProperties:
type: string
oauth_token_secret_name:
type: string
pdb_name_format:
type: string
pod_antiaffinity_topology_key:
type: string
pod_environment_configmap:
type: string
pod_management_policy:
type: string
enum:
- "ordered_ready"
- "parallel"
pod_priority_class_name:
type: string
pod_role_label:
type: string
pod_service_account_definition:
type: string
pod_service_account_name:
type: string
pod_service_account_role_binding_definition:
type: string
pod_terminate_grace_period:
type: string
secret_name_template:
type: string
spilo_fsgroup:
type: integer
spilo_privileged:
type: boolean
toleration:
type: object
additionalProperties:
type: string
watched_namespace:
type: string
postgres_pod_resources:
type: object
properties:
default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
min_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
min_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
timeouts:
type: object
properties:
pod_label_wait_timeout:
type: string
pod_deletion_wait_timeout:
type: string
ready_wait_interval:
type: string
ready_wait_timeout:
type: string
resource_check_interval:
type: string
resource_check_timeout:
type: string
load_balancer:
type: object
properties:
custom_service_annotations:
type: object
additionalProperties:
type: string
db_hosted_zone:
type: string
enable_master_load_balancer:
type: boolean
enable_replica_load_balancer:
type: boolean
master_dns_name_format:
type: string
replica_dns_name_format:
type: string
aws_or_gcp:
type: object
properties:
additional_secret_mount:
type: string
additional_secret_mount_path:
type: string
aws_region:
type: string
kube_iam_role:
type: string
log_s3_bucket:
type: string
wal_s3_bucket:
type: string
logical_backup:
type: object
properties:
logical_backup_docker_image:
type: string
logical_backup_s3_access_key_id:
type: string
logical_backup_s3_bucket:
type: string
logical_backup_s3_endpoint:
type: string
logical_backup_s3_region:
type: string
logical_backup_s3_secret_access_key:
type: string
logical_backup_s3_sse:
type: string
logical_backup_schedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
debug:
type: object
properties:
debug_logging:
type: boolean
enable_database_access:
type: boolean
teams_api:
type: object
properties:
enable_admin_role_for_users:
type: boolean
enable_team_superuser:
type: boolean
enable_teams_api:
type: boolean
pam_configuration:
type: string
pam_role_name:
type: string
postgres_superuser_teams:
type: array
items:
type: string
protected_role_names:
type: array
items:
type: string
team_admin_role:
type: string
team_api_role_configuration:
type: object
additionalProperties:
type: string
teams_api_url:
type: string
logging_rest_api:
type: object
properties:
api_port:
type: integer
cluster_history_entries:
type: integer
ring_log_lines:
type: integer
scalyr: # deprecated
type: object
properties:
scalyr_api_key:
type: string
scalyr_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
scalyr_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
scalyr_image:
type: string
scalyr_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
scalyr_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
scalyr_server_url:
type: string
connection_pooler:
type: object
properties:
connection_pooler_schema:
type: string
#default: "pooler"
connection_pooler_user:
type: string
#default: "pooler"
connection_pooler_image:
type: string
#default: "registry.opensource.zalan.do/acid/pgbouncer"
connection_pooler_max_db_connections:
type: integer
#default: 60
connection_pooler_mode:
type: string
enum:
- "session"
- "transaction"
#default: "transaction"
connection_pooler_number_of_instances:
type: integer
minimum: 2
#default: 2
connection_pooler_default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
#default: "1"
connection_pooler_default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
#default: "500m"
connection_pooler_default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
#default: "100Mi"
connection_pooler_default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
#default: "100Mi"
status:
type: object
additionalProperties:
type: string type: string
enum:
- OperatorConfiguration
apiVersion:
type: string
enum:
- acid.zalan.do/v1
configuration:
type: object
properties:
docker_image:
type: string
enable_crd_validation:
type: boolean
enable_lazy_spilo_upgrade:
type: boolean
enable_pgversion_env_var:
type: boolean
enable_shm_volume:
type: boolean
enable_spilo_wal_path_compat:
type: boolean
etcd_host:
type: string
kubernetes_use_configmaps:
type: boolean
max_instances:
type: integer
minimum: -1 # -1 = disabled
min_instances:
type: integer
minimum: -1 # -1 = disabled
resync_period:
type: string
repair_period:
type: string
set_memory_request_to_limit:
type: boolean
sidecar_docker_images:
type: object
additionalProperties:
type: string
sidecars:
type: array
nullable: true
items:
type: object
x-kubernetes-preserve-unknown-fields: true
workers:
type: integer
minimum: 1
users:
type: object
properties:
replication_username:
type: string
super_username:
type: string
kubernetes:
type: object
properties:
cluster_domain:
type: string
cluster_labels:
type: object
additionalProperties:
type: string
cluster_name_label:
type: string
custom_pod_annotations:
type: object
additionalProperties:
type: string
delete_annotation_date_key:
type: string
delete_annotation_name_key:
type: string
downscaler_annotations:
type: array
items:
type: string
enable_init_containers:
type: boolean
enable_pod_antiaffinity:
type: boolean
enable_pod_disruption_budget:
type: boolean
enable_sidecars:
type: boolean
infrastructure_roles_secret_name:
type: string
infrastructure_roles_secrets:
type: array
nullable: true
items:
type: object
required:
- secretname
- userkey
- passwordkey
properties:
secretname:
type: string
userkey:
type: string
passwordkey:
type: string
rolekey:
type: string
defaultuservalue:
type: string
defaultrolevalue:
type: string
details:
type: string
template:
type: boolean
inherited_annotations:
type: array
items:
type: string
inherited_labels:
type: array
items:
type: string
master_pod_move_timeout:
type: string
node_readiness_label:
type: object
additionalProperties:
type: string
oauth_token_secret_name:
type: string
pdb_name_format:
type: string
pod_antiaffinity_topology_key:
type: string
pod_environment_configmap:
type: string
pod_environment_secret:
type: string
pod_management_policy:
type: string
enum:
- "ordered_ready"
- "parallel"
pod_priority_class_name:
type: string
pod_role_label:
type: string
pod_service_account_definition:
type: string
pod_service_account_name:
type: string
pod_service_account_role_binding_definition:
type: string
pod_terminate_grace_period:
type: string
secret_name_template:
type: string
spilo_runasuser:
type: integer
spilo_runasgroup:
type: integer
spilo_fsgroup:
type: integer
spilo_privileged:
type: boolean
storage_resize_mode:
type: string
enum:
- "ebs"
- "pvc"
- "off"
toleration:
type: object
additionalProperties:
type: string
watched_namespace:
type: string
postgres_pod_resources:
type: object
properties:
default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
min_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
min_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
timeouts:
type: object
properties:
pod_label_wait_timeout:
type: string
pod_deletion_wait_timeout:
type: string
ready_wait_interval:
type: string
ready_wait_timeout:
type: string
resource_check_interval:
type: string
resource_check_timeout:
type: string
load_balancer:
type: object
properties:
custom_service_annotations:
type: object
additionalProperties:
type: string
db_hosted_zone:
type: string
enable_master_load_balancer:
type: boolean
enable_replica_load_balancer:
type: boolean
external_traffic_policy:
type: string
enum:
- "Cluster"
- "Local"
master_dns_name_format:
type: string
replica_dns_name_format:
type: string
aws_or_gcp:
type: object
properties:
additional_secret_mount:
type: string
additional_secret_mount_path:
type: string
aws_region:
type: string
enable_ebs_gp3_migration:
type: boolean
enable_ebs_gp3_migration_max_size:
type: integer
gcp_credentials:
type: string
kube_iam_role:
type: string
log_s3_bucket:
type: string
wal_gs_bucket:
type: string
wal_s3_bucket:
type: string
logical_backup:
type: object
properties:
logical_backup_docker_image:
type: string
logical_backup_google_application_credentials:
type: string
logical_backup_provider:
type: string
logical_backup_s3_access_key_id:
type: string
logical_backup_s3_bucket:
type: string
logical_backup_s3_endpoint:
type: string
logical_backup_s3_region:
type: string
logical_backup_s3_secret_access_key:
type: string
logical_backup_s3_sse:
type: string
logical_backup_schedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
debug:
type: object
properties:
debug_logging:
type: boolean
enable_database_access:
type: boolean
teams_api:
type: object
properties:
enable_admin_role_for_users:
type: boolean
enable_postgres_team_crd:
type: boolean
enable_postgres_team_crd_superusers:
type: boolean
enable_team_superuser:
type: boolean
enable_teams_api:
type: boolean
pam_configuration:
type: string
pam_role_name:
type: string
postgres_superuser_teams:
type: array
items:
type: string
protected_role_names:
type: array
items:
type: string
team_admin_role:
type: string
team_api_role_configuration:
type: object
additionalProperties:
type: string
teams_api_url:
type: string
logging_rest_api:
type: object
properties:
api_port:
type: integer
cluster_history_entries:
type: integer
ring_log_lines:
type: integer
scalyr: # deprecated
type: object
properties:
scalyr_api_key:
type: string
scalyr_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
scalyr_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
scalyr_image:
type: string
scalyr_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
scalyr_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
scalyr_server_url:
type: string
connection_pooler:
type: object
properties:
connection_pooler_schema:
type: string
#default: "pooler"
connection_pooler_user:
type: string
#default: "pooler"
connection_pooler_image:
type: string
#default: "registry.opensource.zalan.do/acid/pgbouncer"
connection_pooler_max_db_connections:
type: integer
#default: 60
connection_pooler_mode:
type: string
enum:
- "session"
- "transaction"
#default: "transaction"
connection_pooler_number_of_instances:
type: integer
minimum: 2
#default: 2
connection_pooler_default_cpu_limit:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
#default: "1"
connection_pooler_default_cpu_request:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
#default: "500m"
connection_pooler_default_memory_limit:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
#default: "100Mi"
connection_pooler_default_memory_request:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
#default: "100Mi"
status:
type: object
additionalProperties:
type: string

View File

@ -2,8 +2,12 @@ apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: postgres-operator name: postgres-operator
labels:
application: postgres-operator
spec: spec:
replicas: 1 replicas: 1
strategy:
type: "Recreate"
selector: selector:
matchLabels: matchLabels:
name: postgres-operator name: postgres-operator
@ -15,7 +19,7 @@ spec:
serviceAccountName: postgres-operator serviceAccountName: postgres-operator
containers: containers:
- name: postgres-operator - name: postgres-operator
image: registry.opensource.zalan.do/acid/postgres-operator:v1.5.0 image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:

View File

@ -0,0 +1,11 @@
apiVersion: scheduling.k8s.io/v1
description: 'This priority class must be used only for databases controlled by the
Postgres operator'
kind: PriorityClass
metadata:
labels:
application: postgres-operator
name: postgres-pod-priority
preemptionPolicy: PreemptLowerPriority
globalDefault: false
value: 1000000

View File

@ -3,10 +3,12 @@ kind: OperatorConfiguration
metadata: metadata:
name: postgresql-operator-default-configuration name: postgresql-operator-default-configuration
configuration: configuration:
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
# enable_crd_validation: true # enable_crd_validation: true
# enable_lazy_spilo_upgrade: false # enable_lazy_spilo_upgrade: false
enable_pgversion_env_var: true
# enable_shm_volume: true # enable_shm_volume: true
enable_spilo_wal_path_compat: false
etcd_host: "" etcd_host: ""
# kubernetes_use_configmaps: false # kubernetes_use_configmaps: false
max_instances: -1 max_instances: -1
@ -19,7 +21,7 @@ configuration:
# name: global-sidecar-1 # name: global-sidecar-1
# ports: # ports:
# - containerPort: 80 # - containerPort: 80
workers: 4 workers: 8
users: users:
replication_username: standby replication_username: standby
super_username: postgres super_username: postgres
@ -31,6 +33,8 @@ configuration:
# custom_pod_annotations: # custom_pod_annotations:
# keya: valuea # keya: valuea
# keyb: valueb # keyb: valueb
# delete_annotation_date_key: delete-date
# delete_annotation_name_key: delete-clustername
# downscaler_annotations: # downscaler_annotations:
# - deployment-time # - deployment-time
# - downscaler/* # - downscaler/*
@ -39,6 +43,16 @@ configuration:
enable_pod_disruption_budget: true enable_pod_disruption_budget: true
enable_sidecars: true enable_sidecars: true
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles" # infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
# infrastructure_roles_secrets:
# - secretname: "monitoring-roles"
# userkey: "user"
# passwordkey: "password"
# rolekey: "inrole"
# - secretname: "other-infrastructure-role"
# userkey: "other-user-key"
# passwordkey: "other-password-key"
# inherited_annotations:
# - owned-by
# inherited_labels: # inherited_labels:
# - application # - application
# - environment # - environment
@ -49,16 +63,20 @@ configuration:
pdb_name_format: "postgres-{cluster}-pdb" pdb_name_format: "postgres-{cluster}-pdb"
pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# pod_environment_configmap: "default/my-custom-config" # pod_environment_configmap: "default/my-custom-config"
# pod_environment_secret: "my-custom-secret"
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# pod_priority_class_name: "" # pod_priority_class_name: "postgres-pod-priority"
pod_role_label: spilo-role pod_role_label: spilo-role
# pod_service_account_definition: "" # pod_service_account_definition: ""
pod_service_account_name: postgres-pod pod_service_account_name: postgres-pod
# pod_service_account_role_binding_definition: "" # pod_service_account_role_binding_definition: ""
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# spilo_runasuser: 101
# spilo_runasgroup: 103
# spilo_fsgroup: 103 # spilo_fsgroup: 103
spilo_privileged: false spilo_privileged: false
storage_resize_mode: pvc
# toleration: {} # toleration: {}
# watched_namespace: "" # watched_namespace: ""
postgres_pod_resources: postgres_pod_resources:
@ -76,23 +94,28 @@ configuration:
resource_check_interval: 3s resource_check_interval: 3s
resource_check_timeout: 10m resource_check_timeout: 10m
load_balancer: load_balancer:
# db_hosted_zone: ""
enable_master_load_balancer: false
enable_replica_load_balancer: false
# custom_service_annotations: # custom_service_annotations:
# keyx: valuex # keyx: valuex
# keyy: valuey # keyy: valuey
# db_hosted_zone: ""
enable_master_load_balancer: false
enable_replica_load_balancer: false
external_traffic_policy: "Cluster"
master_dns_name_format: "{cluster}.{team}.{hostedzone}" master_dns_name_format: "{cluster}.{team}.{hostedzone}"
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
aws_or_gcp: aws_or_gcp:
# additional_secret_mount: "some-secret-name" # additional_secret_mount: "some-secret-name"
# additional_secret_mount_path: "/some/dir" # additional_secret_mount_path: "/some/dir"
aws_region: eu-central-1 aws_region: eu-central-1
enable_ebs_gp3_migration: false
# enable_ebs_gp3_migration_max_size: 1000
# gcp_credentials: ""
# kube_iam_role: "" # kube_iam_role: ""
# log_s3_bucket: "" # log_s3_bucket: ""
# wal_gs_bucket: ""
# wal_s3_bucket: "" # wal_s3_bucket: ""
logical_backup: logical_backup:
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:master-58" logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v.1.6.0"
# logical_backup_s3_access_key_id: "" # logical_backup_s3_access_key_id: ""
logical_backup_s3_bucket: "my-bucket-url" logical_backup_s3_bucket: "my-bucket-url"
# logical_backup_s3_endpoint: "" # logical_backup_s3_endpoint: ""
@ -105,6 +128,8 @@ configuration:
enable_database_access: true enable_database_access: true
teams_api: teams_api:
# enable_admin_role_for_users: true # enable_admin_role_for_users: true
# enable_postgres_team_crd: false
# enable_postgres_team_crd_superusers: false
enable_team_superuser: false enable_team_superuser: false
enable_teams_api: false enable_teams_api: false
# pam_configuration: "" # pam_configuration: ""
@ -126,7 +151,7 @@ configuration:
connection_pooler_default_cpu_request: "500m" connection_pooler_default_cpu_request: "500m"
connection_pooler_default_memory_limit: 100Mi connection_pooler_default_memory_limit: 100Mi
connection_pooler_default_memory_request: 100Mi connection_pooler_default_memory_request: 100Mi
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-7" connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9"
# connection_pooler_max_db_connections: 60 # connection_pooler_max_db_connections: 60
connection_pooler_mode: "transaction" connection_pooler_mode: "transaction"
connection_pooler_number_of_instances: 2 connection_pooler_number_of_instances: 2

View File

@ -1,4 +1,4 @@
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
name: postgresqls.acid.zalan.do name: postgresqls.acid.zalan.do
@ -11,437 +11,557 @@ spec:
singular: postgresql singular: postgresql
shortNames: shortNames:
- pg - pg
categories:
- all
scope: Namespaced scope: Namespaced
subresources: versions:
status: {} - name: v1
version: v1 served: true
validation: storage: true
openAPIV3Schema: subresources:
type: object status: {}
required: additionalPrinterColumns:
- kind - name: Team
- apiVersion type: string
- spec description: Team responsible for Postgres CLuster
properties: jsonPath: .spec.teamId
kind: - name: Version
type: string type: string
enum: description: PostgreSQL version
- postgresql jsonPath: .spec.postgresql.version
apiVersion: - name: Pods
type: string type: integer
enum: description: Number of Pods per Postgres cluster
- acid.zalan.do/v1 jsonPath: .spec.numberOfInstances
spec: - name: Volume
type: object type: string
required: description: Size of the bound volume
- numberOfInstances jsonPath: .spec.volume.size
- teamId - name: CPU-Request
- postgresql type: string
properties: description: Requested CPU for Postgres containers
additionalVolumes: jsonPath: .spec.resources.requests.cpu
type: array - name: Memory-Request
items: type: string
type: object description: Requested memory for Postgres containers
required: jsonPath: .spec.resources.requests.memory
- name - name: Age
- mountPath type: date
- volumeSource jsonPath: .metadata.creationTimestamp
properties: - name: Status
name: type: string
type: string description: Current sync status of postgresql resource
mountPath: jsonPath: .status.PostgresClusterStatus
type: string schema:
targetContainers: openAPIV3Schema:
type: array type: object
nullable: true required:
items: - kind
type: string - apiVersion
volumeSource: - spec
type: object properties:
subPath: kind:
type: string type: string
allowedSourceRanges: enum:
type: array - postgresql
nullable: true apiVersion:
items: type: string
type: string enum:
pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$' - acid.zalan.do/v1
clone: spec:
type: object type: object
required: required:
- cluster - numberOfInstances
properties: - teamId
cluster: - postgresql
type: string - volume
s3_endpoint: properties:
type: string additionalVolumes:
s3_access_key_id: type: array
type: string items:
s3_secret_access_key:
type: string
s3_force_path_style:
type: boolean
s3_wal_path:
type: string
timestamp:
type: string
pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([Zz])|([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$'
# The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC
# Example: 1996-12-19T16:39:57-08:00
# Note: this field requires a timezone
uid:
format: uuid
type: string
connectionPooler:
type: object
properties:
dockerImage:
type: string
maxDBConnections:
type: integer
mode:
type: string
enum:
- "session"
- "transaction"
numberOfInstances:
type: integer
minimum: 2
resources:
type: object type: object
required: required:
- requests - name
- limits - mountPath
- volumeSource
properties: properties:
limits: name:
type: string
mountPath:
type: string
targetContainers:
type: array
nullable: true
items:
type: string
volumeSource:
type: object type: object
required: x-kubernetes-preserve-unknown-fields: true
- cpu subPath:
- memory
properties:
cpu:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
memory:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
requests:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
memory:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
schema:
type: string
user:
type: string
databases:
type: object
additionalProperties:
type: string
# Note: usernames specified here as database owners must be declared in the users key of the spec key.
dockerImage:
type: string
enableConnectionPooler:
type: boolean
enableLogicalBackup:
type: boolean
enableMasterLoadBalancer:
type: boolean
enableReplicaLoadBalancer:
type: boolean
enableShmVolume:
type: boolean
init_containers: # deprecated
type: array
nullable: true
items:
type: object
additionalProperties: true
initContainers:
type: array
nullable: true
items:
type: object
additionalProperties: true
logicalBackupSchedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
maintenanceWindows:
type: array
items:
type: string
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
numberOfInstances:
type: integer
minimum: 0
patroni:
type: object
properties:
initdb:
type: object
additionalProperties:
type: string
pg_hba:
type: array
items:
type: string
slots:
type: object
additionalProperties:
type: object
additionalProperties:
type: string type: string
ttl: allowedSourceRanges:
type: integer
loop_wait:
type: integer
retry_timeout:
type: integer
maximum_lag_on_failover:
type: integer
synchronous_mode:
type: boolean
synchronous_mode_strict:
type: boolean
podAnnotations:
type: object
additionalProperties:
type: string
pod_priority_class_name: # deprecated
type: string
podPriorityClassName:
type: string
postgresql:
type: object
required:
- version
properties:
version:
type: string
enum:
- "9.3"
- "9.4"
- "9.5"
- "9.6"
- "10"
- "11"
- "12"
parameters:
type: object
additionalProperties:
type: string
preparedDatabases:
type: object
additionalProperties:
type: object
properties:
defaultUsers:
type: boolean
extensions:
type: object
additionalProperties:
type: string
schemas:
type: object
additionalProperties:
type: object
properties:
defaultUsers:
type: boolean
defaultRoles:
type: boolean
replicaLoadBalancer: # deprecated
type: boolean
resources:
type: object
required:
- requests
- limits
properties:
limits:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
# Decimal natural followed by m, or decimal natural followed by
# dot followed by up to three decimal digits.
#
# This is because the Kubernetes CPU resource has millis as the
# maximum precision. The actual values are checked in code
# because the regular expression would be huge and horrible and
# not very helpful in validation error messages; this one checks
# only the format of the given number.
#
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
# Note: the value specified here must not be zero or be lower
# than the corresponding request.
memory:
type: string
# You can express memory as a plain integer or as a fixed-point
# integer using one of these suffixes: E, P, T, G, M, k. You can
# also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki
#
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
# Note: the value specified here must not be zero or be lower
# than the corresponding request.
requests:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
# Decimal natural followed by m, or decimal natural followed by
# dot followed by up to three decimal digits.
#
# This is because the Kubernetes CPU resource has millis as the
# maximum precision. The actual values are checked in code
# because the regular expression would be huge and horrible and
# not very helpful in validation error messages; this one checks
# only the format of the given number.
#
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
# Note: the value specified here must not be zero or be higher
# than the corresponding limit.
memory:
type: string
# You can express memory as a plain integer or as a fixed-point
# integer using one of these suffixes: E, P, T, G, M, k. You can
# also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki
#
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
# Note: the value specified here must not be zero or be higher
# than the corresponding limit.
serviceAnnotations:
type: object
additionalProperties:
type: string
sidecars:
type: array
nullable: true
items:
type: object
additionalProperties: true
spiloFSGroup:
type: integer
standby:
type: object
required:
- s3_wal_path
properties:
s3_wal_path:
type: string
teamId:
type: string
tls:
type: object
required:
- secretName
properties:
secretName:
type: string
certificateFile:
type: string
privateKeyFile:
type: string
caFile:
type: string
caSecretName:
type: string
tolerations:
type: array
items:
type: object
required:
- key
- operator
- effect
properties:
key:
type: string
operator:
type: string
enum:
- Equal
- Exists
value:
type: string
effect:
type: string
enum:
- NoExecute
- NoSchedule
- PreferNoSchedule
tolerationSeconds:
type: integer
useLoadBalancer: # deprecated
type: boolean
users:
type: object
additionalProperties:
type: array type: array
nullable: true nullable: true
description: "Role flags specified here must not contradict each other"
items: items:
type: string type: string
enum: pattern: '^(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\/(\d|[1-2]\d|3[0-2])$'
- bypassrls clone:
- BYPASSRLS type: object
- nobypassrls required:
- NOBYPASSRLS - cluster
- createdb properties:
- CREATEDB cluster:
- nocreatedb type: string
- NOCREATEDB s3_endpoint:
- createrole type: string
- CREATEROLE s3_access_key_id:
- nocreaterole type: string
- NOCREATEROLE s3_secret_access_key:
- inherit type: string
- INHERIT s3_force_path_style:
- noinherit type: boolean
- NOINHERIT s3_wal_path:
- login type: string
- LOGIN timestamp:
- nologin type: string
- NOLOGIN pattern: '^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.[0-9]+)?(([+-]([01][0-9]|2[0-3]):[0-5][0-9]))$'
- replication # The regexp matches the date-time format (RFC 3339 Section 5.6) that specifies a timezone as an offset relative to UTC
- REPLICATION # Example: 1996-12-19T16:39:57-08:00
- noreplication # Note: this field requires a timezone
- NOREPLICATION uid:
- superuser format: uuid
- SUPERUSER type: string
- nosuperuser connectionPooler:
- NOSUPERUSER type: object
volume: properties:
type: object dockerImage:
required: type: string
- size maxDBConnections:
properties: type: integer
size: mode:
type: string
enum:
- "session"
- "transaction"
numberOfInstances:
type: integer
minimum: 2
resources:
type: object
required:
- requests
- limits
properties:
limits:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
memory:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
requests:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
memory:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
schema:
type: string
user:
type: string
databases:
type: object
additionalProperties:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' # Note: usernames specified here as database owners must be declared in the users key of the spec key.
# Note: the value specified here must not be zero. dockerImage:
storageClass: type: string
enableConnectionPooler:
type: boolean
enableReplicaConnectionPooler:
type: boolean
enableLogicalBackup:
type: boolean
enableMasterLoadBalancer:
type: boolean
enableReplicaLoadBalancer:
type: boolean
enableShmVolume:
type: boolean
init_containers: # deprecated
type: array
nullable: true
items:
type: object
x-kubernetes-preserve-unknown-fields: true
initContainers:
type: array
nullable: true
items:
type: object
x-kubernetes-preserve-unknown-fields: true
logicalBackupSchedule:
type: string
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
maintenanceWindows:
type: array
items:
type: string type: string
subPath: pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
numberOfInstances:
type: integer
minimum: 0
patroni:
type: object
properties:
initdb:
type: object
additionalProperties:
type: string
loop_wait:
type: integer
maximum_lag_on_failover:
type: integer
pg_hba:
type: array
items:
type: string
retry_timeout:
type: integer
slots:
type: object
additionalProperties:
type: object
additionalProperties:
type: string
synchronous_mode:
type: boolean
synchronous_mode_strict:
type: boolean
ttl:
type: integer
podAnnotations:
type: object
additionalProperties:
type: string type: string
status: pod_priority_class_name: # deprecated
type: object type: string
additionalProperties: podPriorityClassName:
type: string type: string
postgresql:
type: object
required:
- version
properties:
version:
type: string
enum:
- "9.3"
- "9.4"
- "9.5"
- "9.6"
- "10"
- "11"
- "12"
- "13"
parameters:
type: object
additionalProperties:
type: string
preparedDatabases:
type: object
additionalProperties:
type: object
properties:
defaultUsers:
type: boolean
extensions:
type: object
additionalProperties:
type: string
schemas:
type: object
additionalProperties:
type: object
properties:
defaultUsers:
type: boolean
defaultRoles:
type: boolean
replicaLoadBalancer: # deprecated
type: boolean
resources:
type: object
required:
- requests
- limits
properties:
limits:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
# Decimal natural followed by m, or decimal natural followed by
# dot followed by up to three decimal digits.
#
# This is because the Kubernetes CPU resource has millis as the
# maximum precision. The actual values are checked in code
# because the regular expression would be huge and horrible and
# not very helpful in validation error messages; this one checks
# only the format of the given number.
#
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
# Note: the value specified here must not be zero or be lower
# than the corresponding request.
memory:
type: string
# You can express memory as a plain integer or as a fixed-point
# integer using one of these suffixes: E, P, T, G, M, k. You can
# also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki
#
# https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
# Note: the value specified here must not be zero or be higher
# than the corresponding limit.
requests:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
memory:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
schedulerName:
type: string
serviceAnnotations:
type: object
additionalProperties:
type: string
sidecars:
type: array
nullable: true
items:
type: object
x-kubernetes-preserve-unknown-fields: true
spiloRunAsUser:
type: integer
spiloRunAsGroup:
type: integer
spiloFSGroup:
type: integer
standby:
type: object
required:
- s3_wal_path
properties:
s3_wal_path:
type: string
teamId:
type: string
tls:
type: object
required:
- secretName
properties:
secretName:
type: string
certificateFile:
type: string
privateKeyFile:
type: string
caFile:
type: string
caSecretName:
type: string
nodeAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
required:
- weight
- preference
properties:
preference:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
weight:
format: int32
type: integer
requiredDuringSchedulingIgnoredDuringExecution:
type: object
required:
- nodeSelectorTerms
properties:
nodeSelectorTerms:
type: array
items:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
tolerations:
type: array
items:
type: object
required:
- key
- operator
- effect
properties:
key:
type: string
operator:
type: string
enum:
- Equal
- Exists
value:
type: string
effect:
type: string
enum:
- NoExecute
- NoSchedule
- PreferNoSchedule
tolerationSeconds:
type: integer
useLoadBalancer: # deprecated
type: boolean
users:
type: object
additionalProperties:
type: array
nullable: true
description: "Role flags specified here must not contradict each other"
items:
type: string
enum:
- bypassrls
- BYPASSRLS
- nobypassrls
- NOBYPASSRLS
- createdb
- CREATEDB
- nocreatedb
- NOCREATEDB
- createrole
- CREATEROLE
- nocreaterole
- NOCREATEROLE
- inherit
- INHERIT
- noinherit
- NOINHERIT
- login
- LOGIN
- nologin
- NOLOGIN
- replication
- REPLICATION
- noreplication
- NOREPLICATION
- superuser
- SUPERUSER
- nosuperuser
- NOSUPERUSER
volume:
type: object
required:
- size
properties:
size:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
# Note: the value specified here must not be zero.
storageClass:
type: string
subPath:
type: string
status:
type: object
additionalProperties:
type: string

View File

@ -0,0 +1,68 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: postgresteams.acid.zalan.do
spec:
group: acid.zalan.do
names:
kind: PostgresTeam
listKind: PostgresTeamList
plural: postgresteams
singular: postgresteam
shortNames:
- pgteam
categories:
- all
scope: Namespaced
versions:
- name: v1
served: true
storage: true
subresources:
status: {}
schema:
openAPIV3Schema:
type: object
required:
- kind
- apiVersion
- spec
properties:
kind:
type: string
enum:
- PostgresTeam
apiVersion:
type: string
enum:
- acid.zalan.do/v1
spec:
type: object
properties:
additionalSuperuserTeams:
type: object
description: "Map for teamId and associated additional superuser teams"
additionalProperties:
type: array
nullable: true
description: "List of teams to become Postgres superusers"
items:
type: string
additionalTeams:
type: object
description: "Map for teamId and associated additional teams"
additionalProperties:
type: array
nullable: true
description: "List of teams whose members will also be added to the Postgres cluster"
items:
type: string
additionalMembers:
type: object
description: "Map for teamId and associated additional users"
additionalProperties:
type: array
nullable: true
description: "List of users who will also be added to the Postgres cluster"
items:
type: string

View File

@ -9,7 +9,7 @@ spec:
size: 1Gi size: 1Gi
numberOfInstances: 1 numberOfInstances: 1
postgresql: postgresql:
version: "12" version: "13"
# Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming. # Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming.
standby: standby:
s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/" s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/"

View File

@ -13,4 +13,3 @@ nav:
- Config parameters: 'reference/operator_parameters.md' - Config parameters: 'reference/operator_parameters.md'
- Manifest parameters: 'reference/cluster_manifest.md' - Manifest parameters: 'reference/cluster_manifest.md'
- CLI options and environment: 'reference/command_line_and_environment.md' - CLI options and environment: 'reference/command_line_and_environment.md'
- Google Summer of Code 2019: 'gsoc-2019/ideas.md'

1
mocks/mocks.go Normal file
View File

@ -0,0 +1 @@
package mocks

File diff suppressed because it is too large Load Diff

View File

@ -102,7 +102,7 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error {
} }
tmp.Error = err.Error() tmp.Error = err.Error()
tmp.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} tmp.Status.PostgresClusterStatus = ClusterStatusInvalid
*p = Postgresql(tmp) *p = Postgresql(tmp)
@ -113,9 +113,10 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error {
if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil { if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil {
tmp2.Error = err.Error() tmp2.Error = err.Error()
tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}
} else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil { } else if err := validateCloneClusterDescription(tmp2.Spec.Clone); err != nil {
tmp2.Error = err.Error() tmp2.Error = err.Error()
tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid
} else { } else {
tmp2.Spec.ClusterName = clusterName tmp2.Spec.ClusterName = clusterName
} }

View File

@ -45,30 +45,38 @@ type PostgresUsersConfiguration struct {
type KubernetesMetaConfiguration struct { type KubernetesMetaConfiguration struct {
PodServiceAccountName string `json:"pod_service_account_name,omitempty"` PodServiceAccountName string `json:"pod_service_account_name,omitempty"`
// TODO: change it to the proper json // TODO: change it to the proper json
PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"`
PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"`
PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"` PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"`
SpiloPrivileged bool `json:"spilo_privileged,omitempty"` SpiloPrivileged bool `json:"spilo_privileged,omitempty"`
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"` SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
WatchedNamespace string `json:"watched_namespace,omitempty"` SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"` WatchedNamespace string `json:"watched_namespace,omitempty"`
EnableInitContainers *bool `json:"enable_init_containers,omitempty"` PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
EnableSidecars *bool `json:"enable_sidecars,omitempty"` EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"` StorageResizeMode string `json:"storage_resize_mode,omitempty"`
ClusterDomain string `json:"cluster_domain,omitempty"` EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` EnableSidecars *bool `json:"enable_sidecars,omitempty"`
InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
PodRoleLabel string `json:"pod_role_label,omitempty"` ClusterDomain string `json:"cluster_domain,omitempty"`
ClusterLabels map[string]string `json:"cluster_labels,omitempty"` OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`
InheritedLabels []string `json:"inherited_labels,omitempty"` InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"`
DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"` InfrastructureRolesDefs []*config.InfrastructureRole `json:"infrastructure_roles_secrets,omitempty"`
ClusterNameLabel string `json:"cluster_name_label,omitempty"` PodRoleLabel string `json:"pod_role_label,omitempty"`
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` ClusterLabels map[string]string `json:"cluster_labels,omitempty"`
CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"` InheritedLabels []string `json:"inherited_labels,omitempty"`
InheritedAnnotations []string `json:"inherited_annotations,omitempty"`
DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"`
ClusterNameLabel string `json:"cluster_name_label,omitempty"`
DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"`
DeleteAnnotationNameKey string `json:"delete_annotation_name_key,omitempty"`
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"`
CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"`
// TODO: use a proper toleration structure? // TODO: use a proper toleration structure?
PodToleration map[string]string `json:"toleration,omitempty"` PodToleration map[string]string `json:"toleration,omitempty"`
PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"` PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"`
PodEnvironmentSecret string `json:"pod_environment_secret,omitempty"`
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"` MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
@ -104,17 +112,22 @@ type LoadBalancerConfiguration struct {
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"` CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"` MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"` ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"`
} }
// AWSGCPConfiguration defines the configuration for AWS // AWSGCPConfiguration defines the configuration for AWS
// TODO complete Google Cloud Platform (GCP) configuration // TODO complete Google Cloud Platform (GCP) configuration
type AWSGCPConfiguration struct { type AWSGCPConfiguration struct {
WALES3Bucket string `json:"wal_s3_bucket,omitempty"` WALES3Bucket string `json:"wal_s3_bucket,omitempty"`
AWSRegion string `json:"aws_region,omitempty"` AWSRegion string `json:"aws_region,omitempty"`
LogS3Bucket string `json:"log_s3_bucket,omitempty"` WALGSBucket string `json:"wal_gs_bucket,omitempty"`
KubeIAMRole string `json:"kube_iam_role,omitempty"` GCPCredentials string `json:"gcp_credentials,omitempty"`
AdditionalSecretMount string `json:"additional_secret_mount,omitempty"` LogS3Bucket string `json:"log_s3_bucket,omitempty"`
AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"` KubeIAMRole string `json:"kube_iam_role,omitempty"`
AdditionalSecretMount string `json:"additional_secret_mount,omitempty"`
AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"`
EnableEBSGp3Migration bool `json:"enable_ebs_gp3_migration" default:"false"`
EnableEBSGp3MigrationMaxSize int64 `json:"enable_ebs_gp3_migration_max_size" default:"1000"`
} }
// OperatorDebugConfiguration defines options for the debug mode // OperatorDebugConfiguration defines options for the debug mode
@ -125,16 +138,18 @@ type OperatorDebugConfiguration struct {
// TeamsAPIConfiguration defines the configuration of TeamsAPI // TeamsAPIConfiguration defines the configuration of TeamsAPI
type TeamsAPIConfiguration struct { type TeamsAPIConfiguration struct {
EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` EnableTeamsAPI bool `json:"enable_teams_api,omitempty"`
TeamsAPIUrl string `json:"teams_api_url,omitempty"` TeamsAPIUrl string `json:"teams_api_url,omitempty"`
TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"`
EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"` EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"`
EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"` EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"`
TeamAdminRole string `json:"team_admin_role,omitempty"` TeamAdminRole string `json:"team_admin_role,omitempty"`
PamRoleName string `json:"pam_role_name,omitempty"` PamRoleName string `json:"pam_role_name,omitempty"`
PamConfiguration string `json:"pam_configuration,omitempty"` PamConfiguration string `json:"pam_configuration,omitempty"`
ProtectedRoles []string `json:"protected_role_names,omitempty"` ProtectedRoles []string `json:"protected_role_names,omitempty"`
PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"`
EnablePostgresTeamCRD bool `json:"enable_postgres_team_crd,omitempty"`
EnablePostgresTeamCRDSuperusers bool `json:"enable_postgres_team_crd_superusers,omitempty"`
} }
// LoggingRESTAPIConfiguration defines Logging API conf // LoggingRESTAPIConfiguration defines Logging API conf
@ -155,7 +170,7 @@ type ScalyrConfiguration struct {
ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"` ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"`
} }
// Defines default configuration for connection pooler // ConnectionPoolerConfiguration defines default configuration for connection pooler
type ConnectionPoolerConfiguration struct { type ConnectionPoolerConfiguration struct {
NumberOfInstances *int32 `json:"connection_pooler_number_of_instances,omitempty"` NumberOfInstances *int32 `json:"connection_pooler_number_of_instances,omitempty"`
Schema string `json:"connection_pooler_schema,omitempty"` Schema string `json:"connection_pooler_schema,omitempty"`
@ -171,32 +186,35 @@ type ConnectionPoolerConfiguration struct {
// OperatorLogicalBackupConfiguration defines configuration for logical backup // OperatorLogicalBackupConfiguration defines configuration for logical backup
type OperatorLogicalBackupConfiguration struct { type OperatorLogicalBackupConfiguration struct {
Schedule string `json:"logical_backup_schedule,omitempty"` Schedule string `json:"logical_backup_schedule,omitempty"`
DockerImage string `json:"logical_backup_docker_image,omitempty"` DockerImage string `json:"logical_backup_docker_image,omitempty"`
S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` BackupProvider string `json:"logical_backup_provider,omitempty"`
S3Region string `json:"logical_backup_s3_region,omitempty"` S3Bucket string `json:"logical_backup_s3_bucket,omitempty"`
S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` S3Region string `json:"logical_backup_s3_region,omitempty"`
S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"`
S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"` S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"`
S3SSE string `json:"logical_backup_s3_sse,omitempty"` S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"`
S3SSE string `json:"logical_backup_s3_sse,omitempty"`
GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"`
} }
// OperatorConfigurationData defines the operation config // OperatorConfigurationData defines the operation config
type OperatorConfigurationData struct { type OperatorConfigurationData struct {
EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"` EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"`
EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"` EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"`
EtcdHost string `json:"etcd_host,omitempty"` EnablePgVersionEnvVar bool `json:"enable_pgversion_env_var,omitempty"`
KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"` EnableSpiloWalPathCompat bool `json:"enable_spilo_wal_path_compat,omitempty"`
DockerImage string `json:"docker_image,omitempty"` EtcdHost string `json:"etcd_host,omitempty"`
Workers uint32 `json:"workers,omitempty"` KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"`
MinInstances int32 `json:"min_instances,omitempty"` DockerImage string `json:"docker_image,omitempty"`
MaxInstances int32 `json:"max_instances,omitempty"` Workers uint32 `json:"workers,omitempty"`
ResyncPeriod Duration `json:"resync_period,omitempty"` MinInstances int32 `json:"min_instances,omitempty"`
RepairPeriod Duration `json:"repair_period,omitempty"` MaxInstances int32 `json:"max_instances,omitempty"`
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"` ResyncPeriod Duration `json:"resync_period,omitempty"`
ShmVolume *bool `json:"enable_shm_volume,omitempty"` RepairPeriod Duration `json:"repair_period,omitempty"`
// deprecated in favour of SidecarContainers SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` ShmVolume *bool `json:"enable_shm_volume,omitempty"`
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers
SidecarContainers []v1.Container `json:"sidecars,omitempty"` SidecarContainers []v1.Container `json:"sidecars,omitempty"`
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`

View File

@ -0,0 +1,33 @@
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PostgresTeam defines Custom Resource Definition Object for team management.
type PostgresTeam struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PostgresTeamSpec `json:"spec"`
}
// PostgresTeamSpec defines the specification for the PostgresTeam TPR.
type PostgresTeamSpec struct {
AdditionalSuperuserTeams map[string][]string `json:"additionalSuperuserTeams,omitempty"`
AdditionalTeams map[string][]string `json:"additionalTeams,omitempty"`
AdditionalMembers map[string][]string `json:"additionalMembers,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PostgresTeamList defines a list of PostgresTeam definitions.
type PostgresTeamList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []PostgresTeam `json:"items"`
}

View File

@ -29,13 +29,16 @@ type PostgresSpec struct {
Patroni `json:"patroni,omitempty"` Patroni `json:"patroni,omitempty"`
Resources `json:"resources,omitempty"` Resources `json:"resources,omitempty"`
EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"` EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"`
ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"` EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"`
ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"`
TeamID string `json:"teamId"` TeamID string `json:"teamId"`
DockerImage string `json:"dockerImage,omitempty"` DockerImage string `json:"dockerImage,omitempty"`
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"` SpiloRunAsUser *int64 `json:"spiloRunAsUser,omitempty"`
SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"`
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest // vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
// in that case the var evaluates to nil and the value is taken from the operator config // in that case the var evaluates to nil and the value is taken from the operator config
@ -51,12 +54,14 @@ type PostgresSpec struct {
AllowedSourceRanges []string `json:"allowedSourceRanges"` AllowedSourceRanges []string `json:"allowedSourceRanges"`
NumberOfInstances int32 `json:"numberOfInstances"` NumberOfInstances int32 `json:"numberOfInstances"`
Users map[string]UserFlags `json:"users"` Users map[string]UserFlags `json:"users,omitempty"`
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
Clone CloneDescription `json:"clone"` Clone *CloneDescription `json:"clone,omitempty"`
ClusterName string `json:"-"` ClusterName string `json:"-"`
Databases map[string]string `json:"databases,omitempty"` Databases map[string]string `json:"databases,omitempty"`
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"` PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
SchedulerName *string `json:"schedulerName,omitempty"`
NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"`
Tolerations []v1.Toleration `json:"tolerations,omitempty"` Tolerations []v1.Toleration `json:"tolerations,omitempty"`
Sidecars []Sidecar `json:"sidecars,omitempty"` Sidecars []Sidecar `json:"sidecars,omitempty"`
InitContainers []v1.Container `json:"initContainers,omitempty"` InitContainers []v1.Container `json:"initContainers,omitempty"`
@ -64,10 +69,10 @@ type PostgresSpec struct {
ShmVolume *bool `json:"enableShmVolume,omitempty"` ShmVolume *bool `json:"enableShmVolume,omitempty"`
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
StandbyCluster *StandbyDescription `json:"standby"` StandbyCluster *StandbyDescription `json:"standby,omitempty"`
PodAnnotations map[string]string `json:"podAnnotations"` PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
ServiceAnnotations map[string]string `json:"serviceAnnotations"` ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
TLS *TLSDescription `json:"tls"` TLS *TLSDescription `json:"tls,omitempty"`
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"` AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
// deprecated json tags // deprecated json tags
@ -109,14 +114,17 @@ type MaintenanceWindow struct {
// Volume describes a single volume in the manifest. // Volume describes a single volume in the manifest.
type Volume struct { type Volume struct {
Size string `json:"size"` Size string `json:"size"`
StorageClass string `json:"storageClass"` StorageClass string `json:"storageClass,omitempty"`
SubPath string `json:"subPath,omitempty"` SubPath string `json:"subPath,omitempty"`
Iops *int64 `json:"iops,omitempty"`
Throughput *int64 `json:"throughput,omitempty"`
} }
// AdditionalVolume specs additional optional volumes for statefulset
type AdditionalVolume struct { type AdditionalVolume struct {
Name string `json:"name"` Name string `json:"name"`
MountPath string `json:"mountPath"` MountPath string `json:"mountPath"`
SubPath string `json:"subPath"` SubPath string `json:"subPath,omitempty"`
TargetContainers []string `json:"targetContainers"` TargetContainers []string `json:"targetContainers"`
VolumeSource v1.VolumeSource `json:"volumeSource"` VolumeSource v1.VolumeSource `json:"volumeSource"`
} }
@ -124,7 +132,7 @@ type AdditionalVolume struct {
// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values. // PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
type PostgresqlParam struct { type PostgresqlParam struct {
PgVersion string `json:"version"` PgVersion string `json:"version"`
Parameters map[string]string `json:"parameters"` Parameters map[string]string `json:"parameters,omitempty"`
} }
// ResourceDescription describes CPU and memory resources defined for a cluster. // ResourceDescription describes CPU and memory resources defined for a cluster.
@ -141,22 +149,23 @@ type Resources struct {
// Patroni contains Patroni-specific configuration // Patroni contains Patroni-specific configuration
type Patroni struct { type Patroni struct {
InitDB map[string]string `json:"initdb"` InitDB map[string]string `json:"initdb,omitempty"`
PgHba []string `json:"pg_hba"` PgHba []string `json:"pg_hba,omitempty"`
TTL uint32 `json:"ttl"` TTL uint32 `json:"ttl,omitempty"`
LoopWait uint32 `json:"loop_wait"` LoopWait uint32 `json:"loop_wait,omitempty"`
RetryTimeout uint32 `json:"retry_timeout"` RetryTimeout uint32 `json:"retry_timeout,omitempty"`
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213 MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
Slots map[string]map[string]string `json:"slots"` Slots map[string]map[string]string `json:"slots,omitempty"`
SynchronousMode bool `json:"synchronous_mode"` SynchronousMode bool `json:"synchronous_mode,omitempty"`
SynchronousModeStrict bool `json:"synchronous_mode_strict"` SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"`
} }
//StandbyCluster // StandbyDescription contains s3 wal path
type StandbyDescription struct { type StandbyDescription struct {
S3WalPath string `json:"s3_wal_path,omitempty"` S3WalPath string `json:"s3_wal_path,omitempty"`
} }
// TLSDescription specs TLS properties
type TLSDescription struct { type TLSDescription struct {
SecretName string `json:"secretName,omitempty"` SecretName string `json:"secretName,omitempty"`
CertificateFile string `json:"certificateFile,omitempty"` CertificateFile string `json:"certificateFile,omitempty"`
@ -194,7 +203,7 @@ type PostgresStatus struct {
PostgresClusterStatus string `json:"PostgresClusterStatus"` PostgresClusterStatus string `json:"PostgresClusterStatus"`
} }
// Options for connection pooler // ConnectionPooler Options for connection pooler
// //
// TODO: prepared snippets of configuration, one can choose via type, e.g. // TODO: prepared snippets of configuration, one can choose via type, e.g.
// pgbouncer-large (with higher resources) or odyssey-small (with smaller // pgbouncer-large (with higher resources) or odyssey-small (with smaller

View File

@ -1,11 +1,10 @@
package v1 package v1
import ( import (
acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do"
) )
// APIVersion of the `postgresql` and `operator` CRDs // APIVersion of the `postgresql` and `operator` CRDs
@ -44,6 +43,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
// TODO: User uppercase CRDResourceKind of our types in the next major API version // TODO: User uppercase CRDResourceKind of our types in the next major API version
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresql"), &Postgresql{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresql"), &Postgresql{})
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresqlList"), &PostgresqlList{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresqlList"), &PostgresqlList{})
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeam"), &PostgresTeam{})
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeamList"), &PostgresTeamList{})
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfiguration"), scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfiguration"),
&OperatorConfiguration{}) &OperatorConfiguration{})
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfigurationList"), scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfigurationList"),

View File

@ -72,7 +72,7 @@ func extractClusterName(clusterName string, teamName string) (string, error) {
func validateCloneClusterDescription(clone *CloneDescription) error { func validateCloneClusterDescription(clone *CloneDescription) error {
// when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name // when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name
if clone.ClusterName != "" && clone.EndTimestamp == "" { if clone != nil && clone.ClusterName != "" && clone.EndTimestamp == "" {
if !serviceNameRegex.MatchString(clone.ClusterName) { if !serviceNameRegex.MatchString(clone.ClusterName) {
return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q", return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q",
serviceNameRegexString) serviceNameRegexString)

View File

@ -163,7 +163,7 @@ var unmarshalCluster = []struct {
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(), "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":"Invalid"}`),
err: nil}, err: nil},
{ {
about: "example with /status subresource", about: "example with /status subresource",
@ -184,7 +184,7 @@ var unmarshalCluster = []struct {
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1", "kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(), "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
err: nil}, err: nil},
{ {
about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName", about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName",
@ -327,7 +327,7 @@ var unmarshalCluster = []struct {
EndTime: mustParseTime("05:15"), EndTime: mustParseTime("05:15"),
}, },
}, },
Clone: CloneDescription{ Clone: &CloneDescription{
ClusterName: "acid-batman", ClusterName: "acid-batman",
}, },
ClusterName: "testcluster1", ClusterName: "testcluster1",
@ -351,7 +351,7 @@ var unmarshalCluster = []struct {
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
Error: errors.New("name must match {TEAM}-{NAME} format").Error(), Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
err: nil}, err: nil},
{ {
about: "example with clone", about: "example with clone",
@ -366,7 +366,7 @@ var unmarshalCluster = []struct {
}, },
Spec: PostgresSpec{ Spec: PostgresSpec{
TeamID: "acid", TeamID: "acid",
Clone: CloneDescription{ Clone: &CloneDescription{
ClusterName: "team-batman", ClusterName: "team-batman",
}, },
ClusterName: "testcluster1", ClusterName: "testcluster1",
@ -405,7 +405,7 @@ var unmarshalCluster = []struct {
err: errors.New("unexpected end of JSON input")}, err: errors.New("unexpected end of JSON input")},
{ {
about: "expect error on JSON with field's value malformatted", about: "expect error on JSON with field's value malformatted",
in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
out: Postgresql{}, out: Postgresql{},
marshal: []byte{}, marshal: []byte{},
err: errors.New("invalid character 'q' looking for beginning of value"), err: errors.New("invalid character 'q' looking for beginning of value"),

View File

@ -27,6 +27,7 @@ SOFTWARE.
package v1 package v1
import ( import (
config "github.com/zalando/postgres-operator/pkg/util/config"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
) )
@ -146,6 +147,16 @@ func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfigurati
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) { func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) {
*out = *in *out = *in
if in.SpiloRunAsUser != nil {
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
*out = new(int64)
**out = **in
}
if in.SpiloRunAsGroup != nil {
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
*out = new(int64)
**out = **in
}
if in.SpiloFSGroup != nil { if in.SpiloFSGroup != nil {
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
*out = new(int64) *out = new(int64)
@ -168,6 +179,17 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
} }
out.OAuthTokenSecretName = in.OAuthTokenSecretName out.OAuthTokenSecretName = in.OAuthTokenSecretName
out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName
if in.InfrastructureRolesDefs != nil {
in, out := &in.InfrastructureRolesDefs, &out.InfrastructureRolesDefs
*out = make([]*config.InfrastructureRole, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(config.InfrastructureRole)
**out = **in
}
}
}
if in.ClusterLabels != nil { if in.ClusterLabels != nil {
in, out := &in.ClusterLabels, &out.ClusterLabels in, out := &in.ClusterLabels, &out.ClusterLabels
*out = make(map[string]string, len(*in)) *out = make(map[string]string, len(*in))
@ -180,6 +202,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
*out = make([]string, len(*in)) *out = make([]string, len(*in))
copy(*out, *in) copy(*out, *in)
} }
if in.InheritedAnnotations != nil {
in, out := &in.InheritedAnnotations, &out.InheritedAnnotations
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DownscalerAnnotations != nil { if in.DownscalerAnnotations != nil {
in, out := &in.DownscalerAnnotations, &out.DownscalerAnnotations in, out := &in.DownscalerAnnotations, &out.DownscalerAnnotations
*out = make([]string, len(*in)) *out = make([]string, len(*in))
@ -502,7 +529,7 @@ func (in *PostgresPodResourcesDefaults) DeepCopy() *PostgresPodResourcesDefaults
func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
*out = *in *out = *in
in.PostgresqlParam.DeepCopyInto(&out.PostgresqlParam) in.PostgresqlParam.DeepCopyInto(&out.PostgresqlParam)
out.Volume = in.Volume in.Volume.DeepCopyInto(&out.Volume)
in.Patroni.DeepCopyInto(&out.Patroni) in.Patroni.DeepCopyInto(&out.Patroni)
out.Resources = in.Resources out.Resources = in.Resources
if in.EnableConnectionPooler != nil { if in.EnableConnectionPooler != nil {
@ -510,11 +537,26 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
*out = new(bool) *out = new(bool)
**out = **in **out = **in
} }
if in.EnableReplicaConnectionPooler != nil {
in, out := &in.EnableReplicaConnectionPooler, &out.EnableReplicaConnectionPooler
*out = new(bool)
**out = **in
}
if in.ConnectionPooler != nil { if in.ConnectionPooler != nil {
in, out := &in.ConnectionPooler, &out.ConnectionPooler in, out := &in.ConnectionPooler, &out.ConnectionPooler
*out = new(ConnectionPooler) *out = new(ConnectionPooler)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.SpiloRunAsUser != nil {
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
*out = new(int64)
**out = **in
}
if in.SpiloRunAsGroup != nil {
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
*out = new(int64)
**out = **in
}
if in.SpiloFSGroup != nil { if in.SpiloFSGroup != nil {
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
*out = new(int64) *out = new(int64)
@ -567,7 +609,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
(*in)[i].DeepCopyInto(&(*out)[i]) (*in)[i].DeepCopyInto(&(*out)[i])
} }
} }
in.Clone.DeepCopyInto(&out.Clone) if in.Clone != nil {
in, out := &in.Clone, &out.Clone
*out = new(CloneDescription)
(*in).DeepCopyInto(*out)
}
if in.Databases != nil { if in.Databases != nil {
in, out := &in.Databases, &out.Databases in, out := &in.Databases, &out.Databases
*out = make(map[string]string, len(*in)) *out = make(map[string]string, len(*in))
@ -582,6 +628,16 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
(*out)[key] = *val.DeepCopy() (*out)[key] = *val.DeepCopy()
} }
} }
if in.SchedulerName != nil {
in, out := &in.SchedulerName, &out.SchedulerName
*out = new(string)
**out = **in
}
if in.NodeAffinity != nil {
in, out := &in.NodeAffinity, &out.NodeAffinity
*out = new(corev1.NodeAffinity)
(*in).DeepCopyInto(*out)
}
if in.Tolerations != nil { if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations in, out := &in.Tolerations, &out.Tolerations
*out = make([]corev1.Toleration, len(*in)) *out = make([]corev1.Toleration, len(*in))
@ -675,6 +731,127 @@ func (in *PostgresStatus) DeepCopy() *PostgresStatus {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PostgresTeam) DeepCopyInto(out *PostgresTeam) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeam.
func (in *PostgresTeam) DeepCopy() *PostgresTeam {
if in == nil {
return nil
}
out := new(PostgresTeam)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PostgresTeam) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PostgresTeamList) DeepCopyInto(out *PostgresTeamList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PostgresTeam, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamList.
func (in *PostgresTeamList) DeepCopy() *PostgresTeamList {
if in == nil {
return nil
}
out := new(PostgresTeamList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PostgresTeamList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) {
*out = *in
if in.AdditionalSuperuserTeams != nil {
in, out := &in.AdditionalSuperuserTeams, &out.AdditionalSuperuserTeams
*out = make(map[string][]string, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.AdditionalTeams != nil {
in, out := &in.AdditionalTeams, &out.AdditionalTeams
*out = make(map[string][]string, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.AdditionalMembers != nil {
in, out := &in.AdditionalMembers, &out.AdditionalMembers
*out = make(map[string][]string, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamSpec.
func (in *PostgresTeamSpec) DeepCopy() *PostgresTeamSpec {
if in == nil {
return nil
}
out := new(PostgresTeamSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) { func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) {
*out = *in *out = *in
@ -993,6 +1170,16 @@ func (in UserFlags) DeepCopy() UserFlags {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Volume) DeepCopyInto(out *Volume) { func (in *Volume) DeepCopyInto(out *Volume) {
*out = *in *out = *in
if in.Iops != nil {
in, out := &in.Iops, &out.Iops
*out = new(int64)
**out = **in
}
if in.Throughput != nil {
in, out := &in.Throughput, &out.Throughput
*out = new(int64)
**out = **in
}
return return
} }

View File

@ -5,7 +5,6 @@ package cluster
import ( import (
"context" "context"
"database/sql" "database/sql"
"encoding/json"
"fmt" "fmt"
"reflect" "reflect"
"regexp" "regexp"
@ -13,21 +12,12 @@ import (
"sync" "sync"
"time" "time"
"github.com/r3labs/diff"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
policybeta1 "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
"github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/spec"
pgteams "github.com/zalando/postgres-operator/pkg/teams"
"github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/constants"
@ -35,7 +25,17 @@ import (
"github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/patroni"
"github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/teams"
"github.com/zalando/postgres-operator/pkg/util/users" "github.com/zalando/postgres-operator/pkg/util/users"
"github.com/zalando/postgres-operator/pkg/util/volumes"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
policybeta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
) )
var ( var (
@ -49,31 +49,17 @@ var (
type Config struct { type Config struct {
OpConfig config.Config OpConfig config.Config
RestConfig *rest.Config RestConfig *rest.Config
PgTeamMap pgteams.PostgresTeamMap
InfrastructureRoles map[string]spec.PgUser // inherited from the controller InfrastructureRoles map[string]spec.PgUser // inherited from the controller
PodServiceAccount *v1.ServiceAccount PodServiceAccount *v1.ServiceAccount
PodServiceAccountRoleBinding *rbacv1.RoleBinding PodServiceAccountRoleBinding *rbacv1.RoleBinding
} }
// K8S objects that are belongs to a connection pooler
type ConnectionPoolerObjects struct {
Deployment *appsv1.Deployment
Service *v1.Service
// It could happen that a connection pooler was enabled, but the operator
// was not able to properly process a corresponding event or was restarted.
// In this case we will miss missing/require situation and a lookup function
// will not be installed. To avoid synchronizing it all the time to prevent
// this, we can remember the result in memory at least until the next
// restart.
LookupFunction bool
}
type kubeResources struct { type kubeResources struct {
Services map[PostgresRole]*v1.Service Services map[PostgresRole]*v1.Service
Endpoints map[PostgresRole]*v1.Endpoints Endpoints map[PostgresRole]*v1.Endpoints
Secrets map[types.UID]*v1.Secret Secrets map[types.UID]*v1.Secret
Statefulset *appsv1.StatefulSet Statefulset *appsv1.StatefulSet
ConnectionPooler *ConnectionPoolerObjects
PodDisruptionBudget *policybeta1.PodDisruptionBudget PodDisruptionBudget *policybeta1.PodDisruptionBudget
//Pods are treated separately //Pods are treated separately
//PVCs are treated separately //PVCs are treated separately
@ -103,7 +89,9 @@ type Cluster struct {
currentProcess Process currentProcess Process
processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex
specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex
ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects
EBSVolumes map[string]volumes.VolumeProperties
VolumeResizer volumes.VolumeResizer
} }
type compareStatefulsetResult struct { type compareStatefulsetResult struct {
@ -125,6 +113,10 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil
}) })
passwordEncryption, ok := pgSpec.Spec.PostgresqlParam.Parameters["password_encryption"]
if !ok {
passwordEncryption = "md5"
}
cluster := &Cluster{ cluster := &Cluster{
Config: cfg, Config: cfg,
@ -136,7 +128,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
Secrets: make(map[types.UID]*v1.Secret), Secrets: make(map[types.UID]*v1.Secret),
Services: make(map[PostgresRole]*v1.Service), Services: make(map[PostgresRole]*v1.Service),
Endpoints: make(map[PostgresRole]*v1.Endpoints)}, Endpoints: make(map[PostgresRole]*v1.Endpoints)},
userSyncStrategy: users.DefaultUserSyncStrategy{}, userSyncStrategy: users.DefaultUserSyncStrategy{PasswordEncryption: passwordEncryption},
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy}, deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
podEventsQueue: podEventsQueue, podEventsQueue: podEventsQueue,
KubeClient: kubeClient, KubeClient: kubeClient,
@ -146,6 +138,13 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
cluster.oauthTokenGetter = newSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName) cluster.oauthTokenGetter = newSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName)
cluster.patroni = patroni.New(cluster.logger) cluster.patroni = patroni.New(cluster.logger)
cluster.eventRecorder = eventRecorder cluster.eventRecorder = eventRecorder
cluster.EBSVolumes = make(map[string]volumes.VolumeProperties)
if cfg.OpConfig.StorageResizeMode != "pvc" || cfg.OpConfig.EnableEBSGp3Migration {
cluster.VolumeResizer = &volumes.EBSVolumeResizer{AWSRegion: cfg.OpConfig.AWSRegion}
}
return cluster return cluster
} }
@ -181,34 +180,6 @@ func (c *Cluster) GetReference() *v1.ObjectReference {
return ref return ref
} }
// SetStatus of Postgres cluster
// TODO: eventually switch to updateStatus() for kubernetes 1.11 and above
func (c *Cluster) setStatus(status string) {
var pgStatus acidv1.PostgresStatus
pgStatus.PostgresClusterStatus = status
patch, err := json.Marshal(struct {
PgStatus interface{} `json:"status"`
}{&pgStatus})
if err != nil {
c.logger.Errorf("could not marshal status: %v", err)
}
// we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ),
// however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11)
// we should take advantage of it.
newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.clusterNamespace()).Patch(
context.TODO(), c.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status")
if err != nil {
c.logger.Errorf("could not update status: %v", err)
// return as newspec is empty, see PR654
return
}
// update the spec, maintaining the new resourceVersion.
c.setSpec(newspec)
}
func (c *Cluster) isNewCluster() bool { func (c *Cluster) isNewCluster() bool {
return c.Status.Creating() return c.Status.Creating()
} }
@ -257,13 +228,13 @@ func (c *Cluster) Create() error {
defer func() { defer func() {
if err == nil { if err == nil {
c.setStatus(acidv1.ClusterStatusRunning) //TODO: are you sure it's running? c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running?
} else { } else {
c.setStatus(acidv1.ClusterStatusAddFailed) c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed)
} }
}() }()
c.setStatus(acidv1.ClusterStatusCreating) c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating)
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources") c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources")
if err = c.enforceMinResourceLimits(&c.Spec); err != nil { if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
@ -277,7 +248,7 @@ func (c *Cluster) Create() error {
} }
if role == Master { if role == Master {
// replica endpoint will be created by the replica service. Master endpoint needs to be created by us, // replica endpoint will be created by the replica service. Master endpoint needs to be created by us,
// since the corresponding master service doesn't define any selectors. // since the corresponding master service does not define any selectors.
ep, err = c.createEndpoint(role) ep, err = c.createEndpoint(role)
if err != nil { if err != nil {
return fmt.Errorf("could not create %s endpoint: %v", role, err) return fmt.Errorf("could not create %s endpoint: %v", role, err)
@ -371,19 +342,7 @@ func (c *Cluster) Create() error {
// //
// Do not consider connection pooler as a strict requirement, and if // Do not consider connection pooler as a strict requirement, and if
// something fails, report warning // something fails, report warning
if c.needConnectionPooler() { c.createConnectionPooler(c.installLookupFunction)
if c.ConnectionPooler != nil {
c.logger.Warning("Connection pooler already exists in the cluster")
return nil
}
connectionPooler, err := c.createConnectionPooler(c.installLookupFunction)
if err != nil {
c.logger.Warningf("could not create connection pooler: %v", err)
return nil
}
c.logger.Infof("connection pooler %q has been successfully created",
util.NameFromMeta(connectionPooler.Deployment.ObjectMeta))
}
return nil return nil
} }
@ -396,11 +355,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
//TODO: improve me //TODO: improve me
if *c.Statefulset.Spec.Replicas != *statefulSet.Spec.Replicas { if *c.Statefulset.Spec.Replicas != *statefulSet.Spec.Replicas {
match = false match = false
reasons = append(reasons, "new statefulset's number of replicas doesn't match the current one") reasons = append(reasons, "new statefulset's number of replicas does not match the current one")
} }
if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) { if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) {
match = false match = false
reasons = append(reasons, "new statefulset's annotations doesn't match the current one") reasons = append(reasons, "new statefulset's annotations does not match the current one")
} }
needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons) needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons)
@ -417,24 +376,24 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName { if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName {
needsReplace = true needsReplace = true
needsRollUpdate = true needsRollUpdate = true
reasons = append(reasons, "new statefulset's serviceAccountName service account name doesn't match the current one") reasons = append(reasons, "new statefulset's serviceAccountName service account name does not match the current one")
} }
if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds { if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds {
needsReplace = true needsReplace = true
needsRollUpdate = true needsRollUpdate = true
reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds doesn't match the current one") reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds does not match the current one")
} }
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Affinity, statefulSet.Spec.Template.Spec.Affinity) { if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Affinity, statefulSet.Spec.Template.Spec.Affinity) {
needsReplace = true needsReplace = true
needsRollUpdate = true needsRollUpdate = true
reasons = append(reasons, "new statefulset's pod affinity doesn't match the current one") reasons = append(reasons, "new statefulset's pod affinity does not match the current one")
} }
// Some generated fields like creationTimestamp make it not possible to use DeepCompare on Spec.Template.ObjectMeta // Some generated fields like creationTimestamp make it not possible to use DeepCompare on Spec.Template.ObjectMeta
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Labels, statefulSet.Spec.Template.Labels) { if !reflect.DeepEqual(c.Statefulset.Spec.Template.Labels, statefulSet.Spec.Template.Labels) {
needsReplace = true needsReplace = true
needsRollUpdate = true needsRollUpdate = true
reasons = append(reasons, "new statefulset's metadata labels doesn't match the current one") reasons = append(reasons, "new statefulset's metadata labels does not match the current one")
} }
if (c.Statefulset.Spec.Selector != nil) && (statefulSet.Spec.Selector != nil) { if (c.Statefulset.Spec.Selector != nil) && (statefulSet.Spec.Selector != nil) {
if !reflect.DeepEqual(c.Statefulset.Spec.Selector.MatchLabels, statefulSet.Spec.Selector.MatchLabels) { if !reflect.DeepEqual(c.Statefulset.Spec.Selector.MatchLabels, statefulSet.Spec.Selector.MatchLabels) {
@ -445,7 +404,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
return &compareStatefulsetResult{} return &compareStatefulsetResult{}
} }
needsReplace = true needsReplace = true
reasons = append(reasons, "new statefulset's selector doesn't match the current one") reasons = append(reasons, "new statefulset's selector does not match the current one")
} }
} }
@ -453,7 +412,13 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
match = false match = false
needsReplace = true needsReplace = true
needsRollUpdate = true needsRollUpdate = true
reasons = append(reasons, "new statefulset's pod template metadata annotations doesn't match the current one") reasons = append(reasons, "new statefulset's pod template metadata annotations does not match the current one")
}
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.SecurityContext, statefulSet.Spec.Template.Spec.SecurityContext) {
match = false
needsReplace = true
needsRollUpdate = true
reasons = append(reasons, "new statefulset's pod template security context in spec does not match the current one")
} }
if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) { if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) {
needsReplace = true needsReplace = true
@ -464,26 +429,34 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
// Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta // Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta
if name != statefulSet.Spec.VolumeClaimTemplates[i].Name { if name != statefulSet.Spec.VolumeClaimTemplates[i].Name {
needsReplace = true needsReplace = true
reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d doesn't match the current one", i)) reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i))
continue continue
} }
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) { if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) {
needsReplace = true needsReplace = true
reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q doesn't match the current one", name)) reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q does not match the current one", name))
} }
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) { if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) {
name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name
needsReplace = true needsReplace = true
reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q doesn't match the current one", name)) reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q does not match the current one", name))
} }
} }
// we assume any change in priority happens by rolling out a new priority class
// changing the priority value in an existing class is not supproted
if c.Statefulset.Spec.Template.Spec.PriorityClassName != statefulSet.Spec.Template.Spec.PriorityClassName {
match = false
needsReplace = true
needsRollUpdate = true
reasons = append(reasons, "new statefulset's pod priority class in spec does not match the current one")
}
// lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image // lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image
// until they are re-created for other reasons, for example node rotation // until they are re-created for other reasons, for example node rotation
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) { if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) {
needsReplace = true needsReplace = true
needsRollUpdate = false reasons = append(reasons, "lazy Spilo update: new statefulset's pod image does not match the current one")
reasons = append(reasons, "lazy Spilo update: new statefulset's pod image doesn't match the current one")
} }
if needsRollUpdate || needsReplace { if needsRollUpdate || needsReplace {
@ -515,20 +488,22 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe
} }
checks := []containerCheck{ checks := []containerCheck{
newCheck("new statefulset %s's %s (index %d) name doesn't match the current one", newCheck("new statefulset %s's %s (index %d) name does not match the current one",
func(a, b v1.Container) bool { return a.Name != b.Name }), func(a, b v1.Container) bool { return a.Name != b.Name }),
newCheck("new statefulset %s's %s (index %d) ports don't match the current one", newCheck("new statefulset %s's %s (index %d) ports do not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }), func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }),
newCheck("new statefulset %s's %s (index %d) resources don't match the current ones", newCheck("new statefulset %s's %s (index %d) resources do not match the current ones",
func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }), func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }),
newCheck("new statefulset %s's %s (index %d) environment doesn't match the current one", newCheck("new statefulset %s's %s (index %d) environment does not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }), func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }),
newCheck("new statefulset %s's %s (index %d) environment sources don't match the current one", newCheck("new statefulset %s's %s (index %d) environment sources do not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }), func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }),
newCheck("new statefulset %s's %s (index %d) security context does not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.SecurityContext, b.SecurityContext) }),
} }
if !c.OpConfig.EnableLazySpiloUpgrade { if !c.OpConfig.EnableLazySpiloUpgrade {
checks = append(checks, newCheck("new statefulset %s's %s (index %d) image doesn't match the current one", checks = append(checks, newCheck("new statefulset %s's %s (index %d) image does not match the current one",
func(a, b v1.Container) bool { return a.Image != b.Image })) func(a, b v1.Container) bool { return a.Image != b.Image }))
} }
@ -593,7 +568,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err) return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err)
} }
if isSmaller { if isSmaller {
c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit) c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be increased", cpuLimit, minCPULimit)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
spec.Resources.ResourceLimits.CPU = minCPULimit spec.Resources.ResourceLimits.CPU = minCPULimit
} }
@ -606,7 +581,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err) return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err)
} }
if isSmaller { if isSmaller {
c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit) c.logger.Warningf("defined memory limit %s is below required minimum %s and will be increased", memoryLimit, minMemoryLimit)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
spec.Resources.ResourceLimits.Memory = minMemoryLimit spec.Resources.ResourceLimits.Memory = minMemoryLimit
} }
@ -621,34 +596,40 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
// for a cluster that had no such job before. In this case a missing job is not an error. // for a cluster that had no such job before. In this case a missing job is not an error.
func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
updateFailed := false updateFailed := false
syncStatetfulSet := false
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
c.setStatus(acidv1.ClusterStatusUpdating) c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating)
c.setSpec(newSpec) c.setSpec(newSpec)
defer func() { defer func() {
if updateFailed { if updateFailed {
c.setStatus(acidv1.ClusterStatusUpdateFailed) c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed)
} else { } else {
c.setStatus(acidv1.ClusterStatusRunning) c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
} }
}() }()
if oldSpec.Spec.PostgresqlParam.PgVersion != newSpec.Spec.PostgresqlParam.PgVersion { // PG versions comparison logNiceDiff(c.logger, oldSpec, newSpec)
if oldSpec.Spec.PostgresqlParam.PgVersion > newSpec.Spec.PostgresqlParam.PgVersion {
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect", c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect",
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
//we need that hack to generate statefulset with the old version // we need that hack to generate statefulset with the old version
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
} else if oldSpec.Spec.PostgresqlParam.PgVersion < newSpec.Spec.PostgresqlParam.PgVersion {
c.logger.Infof("postgresql version increased (%q -> %q), major version upgrade can be done manually after StatefulSet Sync",
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
syncStatetfulSet = true
} }
// Service // Service
if !reflect.DeepEqual(c.generateService(Master, &oldSpec.Spec), c.generateService(Master, &newSpec.Spec)) || if !reflect.DeepEqual(c.generateService(Master, &oldSpec.Spec), c.generateService(Master, &newSpec.Spec)) ||
!reflect.DeepEqual(c.generateService(Replica, &oldSpec.Spec), c.generateService(Replica, &newSpec.Spec)) { !reflect.DeepEqual(c.generateService(Replica, &oldSpec.Spec), c.generateService(Replica, &newSpec.Spec)) {
c.logger.Debugf("syncing services")
if err := c.syncServices(); err != nil { if err := c.syncServices(); err != nil {
c.logger.Errorf("could not sync services: %v", err) c.logger.Errorf("could not sync services: %v", err)
updateFailed = true updateFailed = true
@ -659,7 +640,8 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// initUsers. Check if it needs to be called. // initUsers. Check if it needs to be called.
sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) && sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) &&
reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases) reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases)
needConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec) needConnectionPooler := needMasterConnectionPoolerWorker(&newSpec.Spec) ||
needReplicaConnectionPoolerWorker(&newSpec.Spec)
if !sameUsers || needConnectionPooler { if !sameUsers || needConnectionPooler {
c.logger.Debugf("syncing secrets") c.logger.Debugf("syncing secrets")
if err := c.initUsers(); err != nil { if err := c.initUsers(); err != nil {
@ -678,13 +660,21 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// Volume // Volume
if oldSpec.Spec.Size != newSpec.Spec.Size { if oldSpec.Spec.Size != newSpec.Spec.Size {
c.logger.Debugf("syncing persistent volumes")
c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume) c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume)
c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode)
if err := c.syncVolumes(); err != nil { if c.OpConfig.StorageResizeMode == "pvc" {
c.logger.Errorf("could not sync persistent volumes: %v", err) if err := c.syncVolumeClaims(); err != nil {
updateFailed = true c.logger.Errorf("could not sync persistent volume claims: %v", err)
updateFailed = true
}
} else if c.OpConfig.StorageResizeMode == "ebs" {
if err := c.syncVolumes(); err != nil {
c.logger.Errorf("could not sync persistent volumes: %v", err)
updateFailed = true
}
} }
} else {
c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.")
} }
// Statefulset // Statefulset
@ -711,8 +701,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
updateFailed = true updateFailed = true
return return
} }
if !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) { if syncStatetfulSet || !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) {
c.logger.Debugf("syncing statefulsets") c.logger.Debugf("syncing statefulsets")
syncStatetfulSet = false
// TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet // TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet
if err := c.syncStatefulSet(); err != nil { if err := c.syncStatefulSet(); err != nil {
c.logger.Errorf("could not sync statefulsets: %v", err) c.logger.Errorf("could not sync statefulsets: %v", err)
@ -791,9 +782,13 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
} }
} }
// sync connection pooler // Sync connection pooler. Before actually doing sync reset lookup
if _, err := c.syncConnectionPooler(oldSpec, newSpec, // installation flag, since manifest updates could add another db which we
c.installLookupFunction); err != nil { // need to process. In the future we may want to do this more careful and
// check which databases we need to process, but even repeating the whole
// installation process should be good enough.
if _, err := c.syncConnectionPooler(oldSpec, newSpec, c.installLookupFunction); err != nil {
c.logger.Errorf("could not sync connection pooler: %v", err) c.logger.Errorf("could not sync connection pooler: %v", err)
updateFailed = true updateFailed = true
} }
@ -801,6 +796,20 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
return nil return nil
} }
func syncResources(a, b *v1.ResourceRequirements) bool {
for _, res := range []v1.ResourceName{
v1.ResourceCPU,
v1.ResourceMemory,
} {
if !a.Limits[res].Equal(b.Limits[res]) ||
!a.Requests[res].Equal(b.Requests[res]) {
return true
}
}
return false
}
// Delete deletes the cluster and cleans up all objects associated with it (including statefulsets). // Delete deletes the cluster and cleans up all objects associated with it (including statefulsets).
// The deletion order here is somewhat significant, because Patroni, when running with the Kubernetes // The deletion order here is somewhat significant, because Patroni, when running with the Kubernetes
// DCS, reuses the master's endpoint to store the leader related metadata. If we remove the endpoint // DCS, reuses the master's endpoint to store the leader related metadata. If we remove the endpoint
@ -821,14 +830,8 @@ func (c *Cluster) Delete() {
c.logger.Warningf("could not delete statefulset: %v", err) c.logger.Warningf("could not delete statefulset: %v", err)
} }
for _, obj := range c.Secrets { if err := c.deleteSecrets(); err != nil {
if doDelete, user := c.shouldDeleteSecret(obj); !doDelete { c.logger.Warningf("could not delete secrets: %v", err)
c.logger.Warningf("not removing secret %q for the system user %q", obj.GetName(), user)
continue
}
if err := c.deleteSecret(obj); err != nil {
c.logger.Warningf("could not delete secret: %v", err)
}
} }
if err := c.deletePodDisruptionBudget(); err != nil { if err := c.deletePodDisruptionBudget(); err != nil {
@ -855,9 +858,12 @@ func (c *Cluster) Delete() {
// Delete connection pooler objects anyway, even if it's not mentioned in the // Delete connection pooler objects anyway, even if it's not mentioned in the
// manifest, just to not keep orphaned components in case if something went // manifest, just to not keep orphaned components in case if something went
// wrong // wrong
if err := c.deleteConnectionPooler(); err != nil { for _, role := range [2]PostgresRole{Master, Replica} {
c.logger.Warningf("could not remove connection pooler: %v", err) if err := c.deleteConnectionPooler(role); err != nil {
c.logger.Warningf("could not remove connection pooler: %v", err)
}
} }
} }
//NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status). //NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status).
@ -927,7 +933,7 @@ func (c *Cluster) initSystemUsers() {
// Connection pooler user is an exception, if requested it's going to be // Connection pooler user is an exception, if requested it's going to be
// created by operator as a normal pgUser // created by operator as a normal pgUser
if c.needConnectionPooler() { if needConnectionPooler(&c.Spec) {
// initialize empty connection pooler if not done yet // initialize empty connection pooler if not done yet
if c.Spec.ConnectionPooler == nil { if c.Spec.ConnectionPooler == nil {
c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{} c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{}
@ -985,32 +991,42 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
} }
for preparedDbName, preparedDB := range c.Spec.PreparedDatabases { for preparedDbName, preparedDB := range c.Spec.PreparedDatabases {
// get list of prepared schemas to set in search_path
preparedSchemas := preparedDB.PreparedSchemas
if len(preparedDB.PreparedSchemas) == 0 {
preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}}
}
var searchPath strings.Builder
searchPath.WriteString(constants.DefaultSearchPath)
for preparedSchemaName := range preparedSchemas {
searchPath.WriteString(", " + preparedSchemaName)
}
// default roles per database // default roles per database
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName); err != nil { if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String()); err != nil {
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
} }
if preparedDB.DefaultUsers { if preparedDB.DefaultUsers {
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName); err != nil { if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String()); err != nil {
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
} }
} }
// default roles per database schema // default roles per database schema
preparedSchemas := preparedDB.PreparedSchemas
if len(preparedDB.PreparedSchemas) == 0 {
preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}}
}
for preparedSchemaName, preparedSchema := range preparedSchemas { for preparedSchemaName, preparedSchema := range preparedSchemas {
if preparedSchema.DefaultRoles == nil || *preparedSchema.DefaultRoles { if preparedSchema.DefaultRoles == nil || *preparedSchema.DefaultRoles {
if err := c.initDefaultRoles(defaultRoles, if err := c.initDefaultRoles(defaultRoles,
preparedDbName+constants.OwnerRoleNameSuffix, preparedDbName+constants.OwnerRoleNameSuffix,
preparedDbName+"_"+preparedSchemaName); err != nil { preparedDbName+"_"+preparedSchemaName,
constants.DefaultSearchPath+", "+preparedSchemaName); err != nil {
return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err) return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err)
} }
if preparedSchema.DefaultUsers { if preparedSchema.DefaultUsers {
if err := c.initDefaultRoles(defaultUsers, if err := c.initDefaultRoles(defaultUsers,
preparedDbName+constants.OwnerRoleNameSuffix, preparedDbName+constants.OwnerRoleNameSuffix,
preparedDbName+"_"+preparedSchemaName); err != nil { preparedDbName+"_"+preparedSchemaName,
constants.DefaultSearchPath+", "+preparedSchemaName); err != nil {
return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err) return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err)
} }
} }
@ -1020,7 +1036,7 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
return nil return nil
} }
func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string) error { func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string, searchPath string) error {
for defaultRole, inherits := range defaultRoles { for defaultRole, inherits := range defaultRoles {
@ -1044,12 +1060,13 @@ func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix
} }
newRole := spec.PgUser{ newRole := spec.PgUser{
Origin: spec.RoleOriginBootstrap, Origin: spec.RoleOriginBootstrap,
Name: roleName, Name: roleName,
Password: util.RandomPassword(constants.PasswordLength), Password: util.RandomPassword(constants.PasswordLength),
Flags: flags, Flags: flags,
MemberOf: memberOf, MemberOf: memberOf,
AdminRole: adminRole, Parameters: map[string]string{"search_path": searchPath},
AdminRole: adminRole,
} }
if currentRole, present := c.pgUsers[roleName]; present { if currentRole, present := c.pgUsers[roleName]; present {
c.pgUsers[roleName] = c.resolveNameConflict(&currentRole, &newRole) c.pgUsers[roleName] = c.resolveNameConflict(&currentRole, &newRole)
@ -1107,7 +1124,7 @@ func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) e
if c.shouldAvoidProtectedOrSystemRole(username, "API role") { if c.shouldAvoidProtectedOrSystemRole(username, "API role") {
continue continue
} }
if c.OpConfig.EnableTeamSuperuser || isPostgresSuperuserTeam { if (c.OpConfig.EnableTeamSuperuser && teamID == c.Spec.TeamID) || isPostgresSuperuserTeam {
flags = append(flags, constants.RoleFlagSuperuser) flags = append(flags, constants.RoleFlagSuperuser)
} else { } else {
if c.OpConfig.TeamAdminRole != "" { if c.OpConfig.TeamAdminRole != "" {
@ -1136,17 +1153,38 @@ func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) e
func (c *Cluster) initHumanUsers() error { func (c *Cluster) initHumanUsers() error {
var clusterIsOwnedBySuperuserTeam bool var clusterIsOwnedBySuperuserTeam bool
superuserTeams := []string{}
if c.OpConfig.EnablePostgresTeamCRDSuperusers {
superuserTeams = c.PgTeamMap.GetAdditionalSuperuserTeams(c.Spec.TeamID, true)
}
for _, postgresSuperuserTeam := range c.OpConfig.PostgresSuperuserTeams { for _, postgresSuperuserTeam := range c.OpConfig.PostgresSuperuserTeams {
err := c.initTeamMembers(postgresSuperuserTeam, true) if !(util.SliceContains(superuserTeams, postgresSuperuserTeam)) {
if err != nil { superuserTeams = append(superuserTeams, postgresSuperuserTeam)
return fmt.Errorf("Cannot create a team %q of Postgres superusers: %v", postgresSuperuserTeam, err)
} }
if postgresSuperuserTeam == c.Spec.TeamID { }
for _, superuserTeam := range superuserTeams {
err := c.initTeamMembers(superuserTeam, true)
if err != nil {
return fmt.Errorf("Cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err)
}
if superuserTeam == c.Spec.TeamID {
clusterIsOwnedBySuperuserTeam = true clusterIsOwnedBySuperuserTeam = true
} }
} }
additionalTeams := c.PgTeamMap.GetAdditionalTeams(c.Spec.TeamID, true)
for _, additionalTeam := range additionalTeams {
if !(util.SliceContains(superuserTeams, additionalTeam)) {
err := c.initTeamMembers(additionalTeam, false)
if err != nil {
return fmt.Errorf("Cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err)
}
}
}
if clusterIsOwnedBySuperuserTeam { if clusterIsOwnedBySuperuserTeam {
c.logger.Infof("Team %q owning the cluster is also a team of superusers. Created superuser roles for its members instead of admin roles.", c.Spec.TeamID) c.logger.Infof("Team %q owning the cluster is also a team of superusers. Created superuser roles for its members instead of admin roles.", c.Spec.TeamID)
return nil return nil
@ -1154,7 +1192,7 @@ func (c *Cluster) initHumanUsers() error {
err := c.initTeamMembers(c.Spec.TeamID, false) err := c.initTeamMembers(c.Spec.TeamID, false)
if err != nil { if err != nil {
return fmt.Errorf("Cannot create a team %q of admins owning the PG cluster: %v", c.Spec.TeamID, err) return fmt.Errorf("Cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err)
} }
return nil return nil
@ -1300,11 +1338,6 @@ func (c *Cluster) Unlock() {
c.mu.Unlock() c.mu.Unlock()
} }
func (c *Cluster) shouldDeleteSecret(secret *v1.Secret) (delete bool, userName string) {
secretUser := string(secret.Data["username"])
return (secretUser != c.OpConfig.ReplicationUsername && secretUser != c.OpConfig.SuperUsername), secretUser
}
type simpleActionWithResult func() error type simpleActionWithResult func() error
type clusterObjectGet func(name string) (spec.NamespacedName, error) type clusterObjectGet func(name string) (spec.NamespacedName, error)
@ -1345,7 +1378,7 @@ func (c *Cluster) deleteClusterObject(
objType, namespacedName) objType, namespacedName)
if err = del(name); err != nil { if err = del(name); err != nil {
return fmt.Errorf("could not Patroni delete cluster object %q with name %q: %v", return fmt.Errorf("could not delete Patroni cluster object %q with name %q: %v",
objType, namespacedName, err) objType, namespacedName, err)
} }
@ -1395,119 +1428,3 @@ func (c *Cluster) deletePatroniClusterConfigMaps() error {
return c.deleteClusterObject(get, deleteConfigMapFn, "configmap") return c.deleteClusterObject(get, deleteConfigMapFn, "configmap")
} }
// Test if two connection pooler configuration needs to be synced. For simplicity
// compare not the actual K8S objects, but the configuration itself and request
// sync if there is any difference.
func (c *Cluster) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) {
reasons = []string{}
sync = false
changelog, err := diff.Diff(oldSpec, newSpec)
if err != nil {
c.logger.Infof("Cannot get diff, do not do anything, %+v", err)
return false, reasons
}
if len(changelog) > 0 {
sync = true
}
for _, change := range changelog {
msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'",
change.Type, change.Path, change.From, change.To)
reasons = append(reasons, msg)
}
return sync, reasons
}
func syncResources(a, b *v1.ResourceRequirements) bool {
for _, res := range []v1.ResourceName{
v1.ResourceCPU,
v1.ResourceMemory,
} {
if !a.Limits[res].Equal(b.Limits[res]) ||
!a.Requests[res].Equal(b.Requests[res]) {
return true
}
}
return false
}
// Check if we need to synchronize connection pooler deployment due to new
// defaults, that are different from what we see in the DeploymentSpec
func (c *Cluster) needSyncConnectionPoolerDefaults(
spec *acidv1.ConnectionPooler,
deployment *appsv1.Deployment) (sync bool, reasons []string) {
reasons = []string{}
sync = false
config := c.OpConfig.ConnectionPooler
podTemplate := deployment.Spec.Template
poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer]
if spec == nil {
spec = &acidv1.ConnectionPooler{}
}
if spec.NumberOfInstances == nil &&
*deployment.Spec.Replicas != *config.NumberOfInstances {
sync = true
msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)",
*deployment.Spec.Replicas, *config.NumberOfInstances)
reasons = append(reasons, msg)
}
if spec.DockerImage == "" &&
poolerContainer.Image != config.Image {
sync = true
msg := fmt.Sprintf("DockerImage is different (having %s, required %s)",
poolerContainer.Image, config.Image)
reasons = append(reasons, msg)
}
expectedResources, err := generateResourceRequirements(spec.Resources,
c.makeDefaultConnectionPoolerResources())
// An error to generate expected resources means something is not quite
// right, but for the purpose of robustness do not panic here, just report
// and ignore resources comparison (in the worst case there will be no
// updates for new resource values).
if err == nil && syncResources(&poolerContainer.Resources, expectedResources) {
sync = true
msg := fmt.Sprintf("Resources are different (having %+v, required %+v)",
poolerContainer.Resources, expectedResources)
reasons = append(reasons, msg)
}
if err != nil {
c.logger.Warningf("Cannot generate expected resources, %v", err)
}
for _, env := range poolerContainer.Env {
if spec.User == "" && env.Name == "PGUSER" {
ref := env.ValueFrom.SecretKeyRef.LocalObjectReference
if ref.Name != c.credentialSecretName(config.User) {
sync = true
msg := fmt.Sprintf("pooler user is different (having %s, required %s)",
ref.Name, config.User)
reasons = append(reasons, msg)
}
}
if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema {
sync = true
msg := fmt.Sprintf("pooler schema is different (having %s, required %s)",
env.Value, config.Schema)
reasons = append(reasons, msg)
}
}
return sync, reasons
}

View File

@ -12,7 +12,6 @@ import (
"github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/k8sutil"
"github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/teams"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
) )
@ -334,36 +333,6 @@ func TestInitHumanUsersWithSuperuserTeams(t *testing.T) {
} }
} }
func TestShouldDeleteSecret(t *testing.T) {
testName := "TestShouldDeleteSecret"
tests := []struct {
secret *v1.Secret
outcome bool
}{
{
secret: &v1.Secret{Data: map[string][]byte{"username": []byte("foobar")}},
outcome: true,
},
{
secret: &v1.Secret{Data: map[string][]byte{"username": []byte(superUserName)}},
outcome: false,
},
{
secret: &v1.Secret{Data: map[string][]byte{"username": []byte(replicationUserName)}},
outcome: false,
},
}
for _, tt := range tests {
if outcome, username := cl.shouldDeleteSecret(tt.secret); outcome != tt.outcome {
t.Errorf("%s expects the check for deletion of the username %q secret to return %t, got %t",
testName, username, tt.outcome, outcome)
}
}
}
func TestPodAnnotations(t *testing.T) { func TestPodAnnotations(t *testing.T) {
testName := "TestPodAnnotations" testName := "TestPodAnnotations"
tests := []struct { tests := []struct {

View File

@ -0,0 +1,905 @@
package cluster
import (
"context"
"fmt"
"strings"
"github.com/r3labs/diff"
"github.com/sirupsen/logrus"
acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
)
// ConnectionPoolerObjects K8s objects that are belong to connection pooler
type ConnectionPoolerObjects struct {
Deployment *appsv1.Deployment
Service *v1.Service
Name string
ClusterName string
Namespace string
Role PostgresRole
// It could happen that a connection pooler was enabled, but the operator
// was not able to properly process a corresponding event or was restarted.
// In this case we will miss missing/require situation and a lookup function
// will not be installed. To avoid synchronizing it all the time to prevent
// this, we can remember the result in memory at least until the next
// restart.
LookupFunction bool
// Careful with referencing cluster.spec this object pointer changes
// during runtime and lifetime of cluster
}
func (c *Cluster) connectionPoolerName(role PostgresRole) string {
name := c.Name + "-pooler"
if role == Replica {
name = name + "-repl"
}
return name
}
// isConnectionPoolerEnabled
func needConnectionPooler(spec *acidv1.PostgresSpec) bool {
return needMasterConnectionPoolerWorker(spec) ||
needReplicaConnectionPoolerWorker(spec)
}
func needMasterConnectionPooler(spec *acidv1.PostgresSpec) bool {
return needMasterConnectionPoolerWorker(spec)
}
func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) ||
(spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil)
}
func needReplicaConnectionPooler(spec *acidv1.PostgresSpec) bool {
return needReplicaConnectionPoolerWorker(spec)
}
func needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
return spec.EnableReplicaConnectionPooler != nil &&
*spec.EnableReplicaConnectionPooler
}
// Return connection pooler labels selector, which should from one point of view
// inherit most of the labels from the cluster itself, but at the same time
// have e.g. different `application` label, so that recreatePod operation will
// not interfere with it (it lists all the pods via labels, and if there would
// be no difference, it will recreate also pooler pods).
func (c *Cluster) connectionPoolerLabels(role PostgresRole, addExtraLabels bool) *metav1.LabelSelector {
poolerLabels := c.labelsSet(addExtraLabels)
// TODO should be config values
poolerLabels["application"] = "db-connection-pooler"
poolerLabels["connection-pooler"] = c.connectionPoolerName(role)
if addExtraLabels {
extraLabels := map[string]string{}
extraLabels[c.OpConfig.PodRoleLabel] = string(role)
poolerLabels = labels.Merge(poolerLabels, extraLabels)
}
return &metav1.LabelSelector{
MatchLabels: poolerLabels,
MatchExpressions: nil,
}
}
// Prepare the database for connection pooler to be used, i.e. install lookup
// function (do it first, because it should be fast and if it didn't succeed,
// it doesn't makes sense to create more K8S objects. At this moment we assume
// that necessary connection pooler user exists.
//
// After that create all the objects for connection pooler, namely a deployment
// with a chosen pooler and a service to expose it.
// have connectionpooler name in the cp object to have it immutable name
// add these cp related functions to a new cp file
// opConfig, cluster, and database name
func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncReason, error) {
var reason SyncReason
c.setProcessName("creating connection pooler")
//this is essentially sync with nil as oldSpec
if reason, err := c.syncConnectionPooler(nil, &c.Postgresql, LookupFunction); err != nil {
return reason, err
}
return reason, nil
}
//
// Generate pool size related environment variables.
//
// MAX_DB_CONN would specify the global maximum for connections to a target
// database.
//
// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough.
//
// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when
// most of the queries coming through a connection pooler are from the same
// user to the same db). In case if we want to spin up more connection pooler
// instances, take this into account and maintain the same number of
// connections.
//
// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload
// have to wait for spinning up a new connections.
//
// RESERVE_SIZE is how many additional connections to allow for a pooler.
func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar {
spec := &c.Spec
effectiveMode := util.Coalesce(
spec.ConnectionPooler.Mode,
c.OpConfig.ConnectionPooler.Mode)
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
if numberOfInstances == nil {
numberOfInstances = util.CoalesceInt32(
c.OpConfig.ConnectionPooler.NumberOfInstances,
k8sutil.Int32ToPointer(1))
}
effectiveMaxDBConn := util.CoalesceInt32(
spec.ConnectionPooler.MaxDBConnections,
c.OpConfig.ConnectionPooler.MaxDBConnections)
if effectiveMaxDBConn == nil {
effectiveMaxDBConn = k8sutil.Int32ToPointer(
constants.ConnectionPoolerMaxDBConnections)
}
maxDBConn := *effectiveMaxDBConn / *numberOfInstances
defaultSize := maxDBConn / 2
minSize := defaultSize / 2
reserveSize := minSize
return []v1.EnvVar{
{
Name: "CONNECTION_POOLER_PORT",
Value: fmt.Sprint(pgPort),
},
{
Name: "CONNECTION_POOLER_MODE",
Value: effectiveMode,
},
{
Name: "CONNECTION_POOLER_DEFAULT_SIZE",
Value: fmt.Sprint(defaultSize),
},
{
Name: "CONNECTION_POOLER_MIN_SIZE",
Value: fmt.Sprint(minSize),
},
{
Name: "CONNECTION_POOLER_RESERVE_SIZE",
Value: fmt.Sprint(reserveSize),
},
{
Name: "CONNECTION_POOLER_MAX_CLIENT_CONN",
Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections),
},
{
Name: "CONNECTION_POOLER_MAX_DB_CONN",
Value: fmt.Sprint(maxDBConn),
},
}
}
func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
*v1.PodTemplateSpec, error) {
spec := &c.Spec
gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
resources, err := generateResourceRequirements(
spec.ConnectionPooler.Resources,
makeDefaultConnectionPoolerResources(&c.OpConfig))
effectiveDockerImage := util.Coalesce(
spec.ConnectionPooler.DockerImage,
c.OpConfig.ConnectionPooler.Image)
effectiveSchema := util.Coalesce(
spec.ConnectionPooler.Schema,
c.OpConfig.ConnectionPooler.Schema)
if err != nil {
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
}
secretSelector := func(key string) *v1.SecretKeySelector {
effectiveUser := util.Coalesce(
spec.ConnectionPooler.User,
c.OpConfig.ConnectionPooler.User)
return &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: c.credentialSecretName(effectiveUser),
},
Key: key,
}
}
envVars := []v1.EnvVar{
{
Name: "PGHOST",
Value: c.serviceAddress(role),
},
{
Name: "PGPORT",
Value: c.servicePort(role),
},
{
Name: "PGUSER",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: secretSelector("username"),
},
},
// the convention is to use the same schema name as
// connection pooler username
{
Name: "PGSCHEMA",
Value: effectiveSchema,
},
{
Name: "PGPASSWORD",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: secretSelector("password"),
},
},
}
envVars = append(envVars, c.getConnectionPoolerEnvVars()...)
poolerContainer := v1.Container{
Name: connectionPoolerContainer,
Image: effectiveDockerImage,
ImagePullPolicy: v1.PullIfNotPresent,
Resources: *resources,
Ports: []v1.ContainerPort{
{
ContainerPort: pgPort,
Protocol: v1.ProtocolTCP,
},
},
Env: envVars,
ReadinessProbe: &v1.Probe{
Handler: v1.Handler{
TCPSocket: &v1.TCPSocketAction{
Port: intstr.IntOrString{IntVal: pgPort},
},
},
},
}
podTemplate := &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: c.connectionPoolerLabels(role, true).MatchLabels,
Namespace: c.Namespace,
Annotations: c.annotationsSet(c.generatePodAnnotations(spec)),
},
Spec: v1.PodSpec{
ServiceAccountName: c.OpConfig.PodServiceAccountName,
TerminationGracePeriodSeconds: &gracePeriod,
Containers: []v1.Container{poolerContainer},
// TODO: add tolerations to scheduler pooler on the same node
// as database
//Tolerations: *tolerationsSpec,
},
}
return podTemplate, nil
}
func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *ConnectionPoolerObjects) (
*appsv1.Deployment, error) {
spec := &c.Spec
// there are two ways to enable connection pooler, either to specify a
// connectionPooler section or enableConnectionPooler. In the second case
// spec.connectionPooler will be nil, so to make it easier to calculate
// default values, initialize it to an empty structure. It could be done
// anywhere, but here is the earliest common entry point between sync and
// create code, so init here.
if spec.ConnectionPooler == nil {
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
}
podTemplate, err := c.generateConnectionPoolerPodTemplate(connectionPooler.Role)
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
if numberOfInstances == nil {
numberOfInstances = util.CoalesceInt32(
c.OpConfig.ConnectionPooler.NumberOfInstances,
k8sutil.Int32ToPointer(1))
}
if *numberOfInstances < constants.ConnectionPoolerMinInstances {
msg := "Adjusted number of connection pooler instances from %d to %d"
c.logger.Warningf(msg, *numberOfInstances, constants.ConnectionPoolerMinInstances)
*numberOfInstances = constants.ConnectionPoolerMinInstances
}
if err != nil {
return nil, err
}
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: connectionPooler.Name,
Namespace: connectionPooler.Namespace,
Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels,
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
// make StatefulSet object its owner to represent the dependency.
// By itself StatefulSet is being deleted with "Orphaned"
// propagation policy, which means that it's deletion will not
// clean up this deployment, but there is a hope that this object
// will be garbage collected if something went wrong and operator
// didn't deleted it.
OwnerReferences: c.ownerReferences(),
},
Spec: appsv1.DeploymentSpec{
Replicas: numberOfInstances,
Selector: c.connectionPoolerLabels(connectionPooler.Role, false),
Template: *podTemplate,
},
}
return deployment, nil
}
func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPoolerObjects) *v1.Service {
spec := &c.Spec
// there are two ways to enable connection pooler, either to specify a
// connectionPooler section or enableConnectionPooler. In the second case
// spec.connectionPooler will be nil, so to make it easier to calculate
// default values, initialize it to an empty structure. It could be done
// anywhere, but here is the earliest common entry point between sync and
// create code, so init here.
if spec.ConnectionPooler == nil {
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
}
serviceSpec := v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Name: connectionPooler.Name,
Port: pgPort,
TargetPort: intstr.IntOrString{StrVal: c.servicePort(connectionPooler.Role)},
},
},
Type: v1.ServiceTypeClusterIP,
Selector: map[string]string{
"connection-pooler": c.connectionPoolerName(connectionPooler.Role),
},
}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: connectionPooler.Name,
Namespace: connectionPooler.Namespace,
Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels,
Annotations: c.annotationsSet(c.generateServiceAnnotations(connectionPooler.Role, spec)),
// make StatefulSet object its owner to represent the dependency.
// By itself StatefulSet is being deleted with "Orphaned"
// propagation policy, which means that it's deletion will not
// clean up this service, but there is a hope that this object will
// be garbage collected if something went wrong and operator didn't
// deleted it.
OwnerReferences: c.ownerReferences(),
},
Spec: serviceSpec,
}
return service
}
//delete connection pooler
func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
c.logger.Infof("deleting connection pooler spilo-role=%s", role)
// Lack of connection pooler objects is not a fatal error, just log it if
// it was present before in the manifest
if c.ConnectionPooler[role] == nil || role == "" {
c.logger.Debugf("no connection pooler to delete")
return nil
}
// Clean up the deployment object. If deployment resource we've remembered
// is somehow empty, try to delete based on what would we generate
var deployment *appsv1.Deployment
deployment = c.ConnectionPooler[role].Deployment
policy := metav1.DeletePropagationForeground
options := metav1.DeleteOptions{PropagationPolicy: &policy}
if deployment != nil {
// set delete propagation policy to foreground, so that replica set will be
// also deleted.
err = c.KubeClient.
Deployments(c.Namespace).
Delete(context.TODO(), deployment.Name, options)
if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("connection pooler deployment was already deleted")
} else if err != nil {
return fmt.Errorf("could not delete connection pooler deployment: %v", err)
}
c.logger.Infof("connection pooler deployment %s has been deleted for role %s", deployment.Name, role)
}
// Repeat the same for the service object
var service *v1.Service
service = c.ConnectionPooler[role].Service
if service == nil {
c.logger.Debugf("no connection pooler service object to delete")
} else {
err = c.KubeClient.
Services(c.Namespace).
Delete(context.TODO(), service.Name, options)
if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("connection pooler service was already deleted")
} else if err != nil {
return fmt.Errorf("could not delete connection pooler service: %v", err)
}
c.logger.Infof("connection pooler service %s has been deleted for role %s", service.Name, role)
}
c.ConnectionPooler[role].Deployment = nil
c.ConnectionPooler[role].Service = nil
return nil
}
//delete connection pooler
func (c *Cluster) deleteConnectionPoolerSecret() (err error) {
// Repeat the same for the secret object
secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User)
secret, err := c.KubeClient.
Secrets(c.Namespace).
Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil {
c.logger.Debugf("could not get connection pooler secret %s: %v", secretName, err)
} else {
if err = c.deleteSecret(secret.UID, *secret); err != nil {
return fmt.Errorf("could not delete pooler secret: %v", err)
}
}
return nil
}
// Perform actual patching of a connection pooler deployment, assuming that all
// the check were already done before.
func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) {
if newDeployment == nil {
return nil, fmt.Errorf("there is no connection pooler in the cluster")
}
patchData, err := specPatch(newDeployment.Spec)
if err != nil {
return nil, fmt.Errorf("could not form patch for the connection pooler deployment: %v", err)
}
// An update probably requires RetryOnConflict, but since only one operator
// worker at one time will try to update it chances of conflicts are
// minimal.
deployment, err := KubeClient.
Deployments(newDeployment.Namespace).Patch(
context.TODO(),
newDeployment.Name,
types.MergePatchType,
patchData,
metav1.PatchOptions{},
"")
if err != nil {
return nil, fmt.Errorf("could not patch connection pooler deployment: %v", err)
}
return deployment, nil
}
//updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment
func updateConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, deployment *appsv1.Deployment, annotations map[string]string) (*appsv1.Deployment, error) {
patchData, err := metaAnnotationsPatch(annotations)
if err != nil {
return nil, fmt.Errorf("could not form patch for the connection pooler deployment metadata: %v", err)
}
result, err := KubeClient.Deployments(deployment.Namespace).Patch(
context.TODO(),
deployment.Name,
types.MergePatchType,
[]byte(patchData),
metav1.PatchOptions{},
"")
if err != nil {
return nil, fmt.Errorf("could not patch connection pooler annotations %q: %v", patchData, err)
}
return result, nil
}
// Test if two connection pooler configuration needs to be synced. For simplicity
// compare not the actual K8S objects, but the configuration itself and request
// sync if there is any difference.
func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) {
reasons = []string{}
sync = false
changelog, err := diff.Diff(oldSpec, newSpec)
if err != nil {
//c.logger.Infof("Cannot get diff, do not do anything, %+v", err)
return false, reasons
}
if len(changelog) > 0 {
sync = true
}
for _, change := range changelog {
msg := fmt.Sprintf("%s %+v from '%+v' to '%+v'",
change.Type, change.Path, change.From, change.To)
reasons = append(reasons, msg)
}
return sync, reasons
}
// Check if we need to synchronize connection pooler deployment due to new
// defaults, that are different from what we see in the DeploymentSpec
func needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1.ConnectionPooler, deployment *appsv1.Deployment) (sync bool, reasons []string) {
reasons = []string{}
sync = false
config := Config.OpConfig.ConnectionPooler
podTemplate := deployment.Spec.Template
poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer]
if spec == nil {
spec = &acidv1.ConnectionPooler{}
}
if spec.NumberOfInstances == nil &&
*deployment.Spec.Replicas != *config.NumberOfInstances {
sync = true
msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)",
*deployment.Spec.Replicas, *config.NumberOfInstances)
reasons = append(reasons, msg)
}
if spec.DockerImage == "" &&
poolerContainer.Image != config.Image {
sync = true
msg := fmt.Sprintf("DockerImage is different (having %s, required %s)",
poolerContainer.Image, config.Image)
reasons = append(reasons, msg)
}
expectedResources, err := generateResourceRequirements(spec.Resources,
makeDefaultConnectionPoolerResources(&Config.OpConfig))
// An error to generate expected resources means something is not quite
// right, but for the purpose of robustness do not panic here, just report
// and ignore resources comparison (in the worst case there will be no
// updates for new resource values).
if err == nil && syncResources(&poolerContainer.Resources, expectedResources) {
sync = true
msg := fmt.Sprintf("Resources are different (having %+v, required %+v)",
poolerContainer.Resources, expectedResources)
reasons = append(reasons, msg)
}
if err != nil {
return false, reasons
}
for _, env := range poolerContainer.Env {
if spec.User == "" && env.Name == "PGUSER" {
ref := env.ValueFrom.SecretKeyRef.LocalObjectReference
secretName := Config.OpConfig.SecretNameTemplate.Format(
"username", strings.Replace(config.User, "_", "-", -1),
"cluster", deployment.ClusterName,
"tprkind", acidv1.PostgresCRDResourceKind,
"tprgroup", acidzalando.GroupName)
if ref.Name != secretName {
sync = true
msg := fmt.Sprintf("pooler user is different (having %s, required %s)",
ref.Name, config.User)
reasons = append(reasons, msg)
}
}
if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema {
sync = true
msg := fmt.Sprintf("pooler schema is different (having %s, required %s)",
env.Value, config.Schema)
reasons = append(reasons, msg)
}
}
return sync, reasons
}
// Generate default resource section for connection pooler deployment, to be
// used if nothing custom is specified in the manifest
func makeDefaultConnectionPoolerResources(config *config.Config) acidv1.Resources {
defaultRequests := acidv1.ResourceDescription{
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest,
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest,
}
defaultLimits := acidv1.ResourceDescription{
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit,
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit,
}
return acidv1.Resources{
ResourceRequests: defaultRequests,
ResourceLimits: defaultLimits,
}
}
func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql) {
var v []string
var input []*bool
if oldSpec == nil {
input = []*bool{nil, nil, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler}
} else {
input = []*bool{oldSpec.Spec.EnableConnectionPooler, oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler}
}
for _, b := range input {
if b == nil {
v = append(v, "nil")
} else {
v = append(v, fmt.Sprintf("%v", *b))
}
}
log.Debugf("syncing connection pooler from (%v, %v) to (%v, %v)", v[0], v[1], v[2], v[3])
}
func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, LookupFunction InstallFunction) (SyncReason, error) {
logPoolerEssentials(c.logger, oldSpec, newSpec)
var reason SyncReason
var err error
var newNeedConnectionPooler, oldNeedConnectionPooler bool
oldNeedConnectionPooler = false
// Check and perform the sync requirements for each of the roles.
for _, role := range [2]PostgresRole{Master, Replica} {
if role == Master {
newNeedConnectionPooler = needMasterConnectionPoolerWorker(&newSpec.Spec)
if oldSpec != nil {
oldNeedConnectionPooler = needMasterConnectionPoolerWorker(&oldSpec.Spec)
}
} else {
newNeedConnectionPooler = needReplicaConnectionPoolerWorker(&newSpec.Spec)
if oldSpec != nil {
oldNeedConnectionPooler = needReplicaConnectionPoolerWorker(&oldSpec.Spec)
}
}
// if the call is via createConnectionPooler, then it is required to initialize
// the structure
if c.ConnectionPooler == nil {
c.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{}
}
if c.ConnectionPooler[role] == nil {
c.ConnectionPooler[role] = &ConnectionPoolerObjects{
Deployment: nil,
Service: nil,
Name: c.connectionPoolerName(role),
ClusterName: c.ClusterName,
Namespace: c.Namespace,
LookupFunction: false,
Role: role,
}
}
if newNeedConnectionPooler {
// Try to sync in any case. If we didn't needed connection pooler before,
// it means we want to create it. If it was already present, still sync
// since it could happen that there is no difference in specs, and all
// the resources are remembered, but the deployment was manually deleted
// in between
// in this case also do not forget to install lookup function as for
// creating cluster
if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction {
newConnectionPooler := newSpec.Spec.ConnectionPooler
specSchema := ""
specUser := ""
if newConnectionPooler != nil {
specSchema = newConnectionPooler.Schema
specUser = newConnectionPooler.User
}
schema := util.Coalesce(
specSchema,
c.OpConfig.ConnectionPooler.Schema)
user := util.Coalesce(
specUser,
c.OpConfig.ConnectionPooler.User)
if err = LookupFunction(schema, user, role); err != nil {
return NoSync, err
}
}
if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil {
c.logger.Errorf("could not sync connection pooler: %v", err)
return reason, err
}
} else {
// delete and cleanup resources if they are already detected
if c.ConnectionPooler[role] != nil &&
(c.ConnectionPooler[role].Deployment != nil ||
c.ConnectionPooler[role].Service != nil) {
if err = c.deleteConnectionPooler(role); err != nil {
c.logger.Warningf("could not remove connection pooler: %v", err)
}
}
}
}
if !needMasterConnectionPoolerWorker(&newSpec.Spec) &&
!needReplicaConnectionPoolerWorker(&newSpec.Spec) {
if err = c.deleteConnectionPoolerSecret(); err != nil {
c.logger.Warningf("could not remove connection pooler secret: %v", err)
}
}
return reason, nil
}
// Synchronize connection pooler resources. Effectively we're interested only in
// synchronizing the corresponding deployment, but in case of deployment or
// service is missing, create it. After checking, also remember an object for
// the future references.
func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) (
SyncReason, error) {
deployment, err := c.KubeClient.
Deployments(c.Namespace).
Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{})
if err != nil && k8sutil.ResourceNotFound(err) {
msg := "deployment %s for connection pooler synchronization is not found, create it"
c.logger.Warningf(msg, c.connectionPoolerName(role))
deploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role])
if err != nil {
msg = "could not generate deployment for connection pooler: %v"
return NoSync, fmt.Errorf(msg, err)
}
deployment, err := c.KubeClient.
Deployments(deploymentSpec.Namespace).
Create(context.TODO(), deploymentSpec, metav1.CreateOptions{})
if err != nil {
return NoSync, err
}
c.ConnectionPooler[role].Deployment = deployment
} else if err != nil {
msg := "could not get connection pooler deployment to sync: %v"
return NoSync, fmt.Errorf(msg, err)
} else {
c.ConnectionPooler[role].Deployment = deployment
// actual synchronization
var oldConnectionPooler *acidv1.ConnectionPooler
if oldSpec != nil {
oldConnectionPooler = oldSpec.Spec.ConnectionPooler
}
newConnectionPooler := newSpec.Spec.ConnectionPooler
// sync implementation below assumes that both old and new specs are
// not nil, but it can happen. To avoid any confusion like updating a
// deployment because the specification changed from nil to an empty
// struct (that was initialized somewhere before) replace any nil with
// an empty spec.
if oldConnectionPooler == nil {
oldConnectionPooler = &acidv1.ConnectionPooler{}
}
if newConnectionPooler == nil {
newConnectionPooler = &acidv1.ConnectionPooler{}
}
c.logger.Infof("old: %+v, new %+v", oldConnectionPooler, newConnectionPooler)
var specSync bool
var specReason []string
if oldSpec != nil {
specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler)
}
defaultsSync, defaultsReason := needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment)
reason := append(specReason, defaultsReason...)
if specSync || defaultsSync {
c.logger.Infof("Update connection pooler deployment %s, reason: %+v",
c.connectionPoolerName(role), reason)
newDeploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role])
if err != nil {
msg := "could not generate deployment for connection pooler: %v"
return reason, fmt.Errorf(msg, err)
}
deployment, err := updateConnectionPoolerDeployment(c.KubeClient,
newDeploymentSpec)
if err != nil {
return reason, err
}
c.ConnectionPooler[role].Deployment = deployment
}
}
newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(c.ConnectionPooler[role].Deployment.Annotations))
if newAnnotations != nil {
deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations)
if err != nil {
return nil, err
}
c.ConnectionPooler[role].Deployment = deployment
}
service, err := c.KubeClient.
Services(c.Namespace).
Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{})
if err != nil && k8sutil.ResourceNotFound(err) {
msg := "Service %s for connection pooler synchronization is not found, create it"
c.logger.Warningf(msg, c.connectionPoolerName(role))
serviceSpec := c.generateConnectionPoolerService(c.ConnectionPooler[role])
service, err := c.KubeClient.
Services(serviceSpec.Namespace).
Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
if err != nil {
return NoSync, err
}
c.ConnectionPooler[role].Service = service
} else if err != nil {
msg := "could not get connection pooler service to sync: %v"
return NoSync, fmt.Errorf(msg, err)
} else {
// Service updates are not supported and probably not that useful anyway
c.ConnectionPooler[role].Service = service
}
return NoSync, nil
}

View File

@ -0,0 +1,45 @@
package cluster
import (
"testing"
"context"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes/fake"
)
func TestFakeClient(t *testing.T) {
clientSet := fake.NewSimpleClientset()
namespace := "default"
l := labels.Set(map[string]string{
"application": "spilo",
})
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "my-deployment1",
Namespace: namespace,
Labels: l,
},
}
clientSet.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
deployment2, _ := clientSet.AppsV1().Deployments(namespace).Get(context.TODO(), "my-deployment1", metav1.GetOptions{})
if deployment.ObjectMeta.Name != deployment2.ObjectMeta.Name {
t.Errorf("Deployments are not equal")
}
deployments, _ := clientSet.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "application=spilo"})
if len(deployments.Items) != 1 {
t.Errorf("Label search does not work")
}
}

View File

@ -0,0 +1,956 @@
package cluster
import (
"errors"
"fmt"
"strings"
"testing"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func mockInstallLookupFunction(schema string, user string, role PostgresRole) error {
return nil
}
func boolToPointer(value bool) *bool {
return &value
}
func int32ToPointer(value int32) *int32 {
return &value
}
func TestConnectionPoolerCreationAndDeletion(t *testing.T) {
testName := "Test connection pooler creation"
var cluster = New(
Config{
OpConfig: config.Config{
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
NumberOfInstances: int32ToPointer(1),
},
},
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder)
cluster.Statefulset = &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-sts",
},
}
cluster.Spec = acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
}
reason, err := cluster.createConnectionPooler(mockInstallLookupFunction)
if err != nil {
t.Errorf("%s: Cannot create connection pooler, %s, %+v",
testName, err, reason)
}
for _, role := range [2]PostgresRole{Master, Replica} {
if cluster.ConnectionPooler[role] != nil {
if cluster.ConnectionPooler[role].Deployment == nil {
t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role)
}
if cluster.ConnectionPooler[role].Service == nil {
t.Errorf("%s: Connection pooler service is empty for role %s", testName, role)
}
}
}
oldSpec := &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableConnectionPooler: boolToPointer(true),
EnableReplicaConnectionPooler: boolToPointer(true),
},
}
newSpec := &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableConnectionPooler: boolToPointer(false),
EnableReplicaConnectionPooler: boolToPointer(false),
},
}
// Delete connection pooler via sync
_, err = cluster.syncConnectionPooler(oldSpec, newSpec, mockInstallLookupFunction)
if err != nil {
t.Errorf("%s: Cannot sync connection pooler, %s", testName, err)
}
for _, role := range [2]PostgresRole{Master, Replica} {
err = cluster.deleteConnectionPooler(role)
if err != nil {
t.Errorf("%s: Cannot delete connection pooler, %s", testName, err)
}
}
}
func TestNeedConnectionPooler(t *testing.T) {
testName := "Test how connection pooler can be enabled"
var cluster = New(
Config{
OpConfig: config.Config{
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
},
},
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder)
cluster.Spec = acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
}
if !needMasterConnectionPooler(&cluster.Spec) {
t.Errorf("%s: Connection pooler is not enabled with full definition",
testName)
}
cluster.Spec = acidv1.PostgresSpec{
EnableConnectionPooler: boolToPointer(true),
}
if !needMasterConnectionPooler(&cluster.Spec) {
t.Errorf("%s: Connection pooler is not enabled with flag",
testName)
}
cluster.Spec = acidv1.PostgresSpec{
EnableConnectionPooler: boolToPointer(false),
ConnectionPooler: &acidv1.ConnectionPooler{},
}
if needMasterConnectionPooler(&cluster.Spec) {
t.Errorf("%s: Connection pooler is still enabled with flag being false",
testName)
}
cluster.Spec = acidv1.PostgresSpec{
EnableConnectionPooler: boolToPointer(true),
ConnectionPooler: &acidv1.ConnectionPooler{},
}
if !needMasterConnectionPooler(&cluster.Spec) {
t.Errorf("%s: Connection pooler is not enabled with flag and full",
testName)
}
cluster.Spec = acidv1.PostgresSpec{
EnableConnectionPooler: boolToPointer(false),
EnableReplicaConnectionPooler: boolToPointer(false),
ConnectionPooler: nil,
}
if needMasterConnectionPooler(&cluster.Spec) {
t.Errorf("%s: Connection pooler is enabled with flag false and nil",
testName)
}
// Test for replica connection pooler
cluster.Spec = acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
}
if needReplicaConnectionPooler(&cluster.Spec) {
t.Errorf("%s: Replica Connection pooler is not enabled with full definition",
testName)
}
cluster.Spec = acidv1.PostgresSpec{
EnableReplicaConnectionPooler: boolToPointer(true),
}
if !needReplicaConnectionPooler(&cluster.Spec) {
t.Errorf("%s: Replica Connection pooler is not enabled with flag",
testName)
}
cluster.Spec = acidv1.PostgresSpec{
EnableReplicaConnectionPooler: boolToPointer(false),
ConnectionPooler: &acidv1.ConnectionPooler{},
}
if needReplicaConnectionPooler(&cluster.Spec) {
t.Errorf("%s: Replica Connection pooler is still enabled with flag being false",
testName)
}
cluster.Spec = acidv1.PostgresSpec{
EnableReplicaConnectionPooler: boolToPointer(true),
ConnectionPooler: &acidv1.ConnectionPooler{},
}
if !needReplicaConnectionPooler(&cluster.Spec) {
t.Errorf("%s: Replica Connection pooler is not enabled with flag and full",
testName)
}
}
func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error {
for _, role := range [2]PostgresRole{Master, Replica} {
if cluster.ConnectionPooler[role] != nil && cluster.ConnectionPooler[role].Deployment != nil &&
(cluster.ConnectionPooler[role].Deployment.Spec.Replicas == nil ||
*cluster.ConnectionPooler[role].Deployment.Spec.Replicas != 2) {
return fmt.Errorf("Wrong number of instances")
}
}
return nil
}
func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
if cluster.ConnectionPooler == nil {
return fmt.Errorf("Connection pooler resources are empty")
}
for _, role := range []PostgresRole{Master, Replica} {
if cluster.ConnectionPooler[role].Deployment == nil {
return fmt.Errorf("Deployment was not saved %s", role)
}
if cluster.ConnectionPooler[role].Service == nil {
return fmt.Errorf("Service was not saved %s", role)
}
}
return nil
}
func MasterobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
if cluster.ConnectionPooler == nil {
return fmt.Errorf("Connection pooler resources are empty")
}
if cluster.ConnectionPooler[Master].Deployment == nil {
return fmt.Errorf("Deployment was not saved")
}
if cluster.ConnectionPooler[Master].Service == nil {
return fmt.Errorf("Service was not saved")
}
return nil
}
func ReplicaobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
if cluster.ConnectionPooler == nil {
return fmt.Errorf("Connection pooler resources are empty")
}
if cluster.ConnectionPooler[Replica].Deployment == nil {
return fmt.Errorf("Deployment was not saved")
}
if cluster.ConnectionPooler[Replica].Service == nil {
return fmt.Errorf("Service was not saved")
}
return nil
}
func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error {
for _, role := range [2]PostgresRole{Master, Replica} {
if cluster.ConnectionPooler[role] != nil &&
(cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) {
return fmt.Errorf("Connection pooler was not deleted for role %v", role)
}
}
return nil
}
func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error {
if cluster.ConnectionPooler[Master] != nil &&
(cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) {
return fmt.Errorf("Connection pooler master was not deleted")
}
return nil
}
func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error {
if cluster.ConnectionPooler[Replica] != nil &&
(cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) {
return fmt.Errorf("Connection pooler replica was not deleted")
}
return nil
}
func noEmptySync(cluster *Cluster, err error, reason SyncReason) error {
for _, msg := range reason {
if strings.HasPrefix(msg, "update [] from '<nil>' to '") {
return fmt.Errorf("There is an empty reason, %s", msg)
}
}
return nil
}
func TestConnectionPoolerSynchronization(t *testing.T) {
testName := "Test connection pooler synchronization"
newCluster := func(client k8sutil.KubernetesClient) *Cluster {
return New(
Config{
OpConfig: config.Config{
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
NumberOfInstances: int32ToPointer(1),
},
},
}, client, acidv1.Postgresql{}, logger, eventRecorder)
}
cluster := newCluster(k8sutil.KubernetesClient{})
cluster.Statefulset = &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-sts",
},
}
tests := []struct {
subTest string
oldSpec *acidv1.Postgresql
newSpec *acidv1.Postgresql
cluster *Cluster
defaultImage string
defaultInstances int32
check func(cluster *Cluster, err error, reason SyncReason) error
}{
{
subTest: "create from scratch",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
},
cluster: newCluster(k8sutil.ClientMissingObjects()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: MasterobjectsAreSaved,
},
{
subTest: "create if doesn't exist",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
},
cluster: newCluster(k8sutil.ClientMissingObjects()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: MasterobjectsAreSaved,
},
{
subTest: "create if doesn't exist with a flag",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableConnectionPooler: boolToPointer(true),
},
},
cluster: newCluster(k8sutil.ClientMissingObjects()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: MasterobjectsAreSaved,
},
{
subTest: "create no replica with flag",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableReplicaConnectionPooler: boolToPointer(false),
},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: objectsAreDeleted,
},
{
subTest: "create replica if doesn't exist with a flag",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: ReplicaobjectsAreSaved,
},
{
subTest: "create both master and replica",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
EnableConnectionPooler: boolToPointer(true),
},
},
cluster: newCluster(k8sutil.ClientMissingObjects()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: objectsAreSaved,
},
{
subTest: "delete only replica if not needed",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: OnlyReplicaDeleted,
},
{
subTest: "delete only master if not needed",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableConnectionPooler: boolToPointer(true),
},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableReplicaConnectionPooler: boolToPointer(true),
},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: OnlyMasterDeleted,
},
{
subTest: "delete if not needed",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: objectsAreDeleted,
},
{
subTest: "cleanup if still there",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: objectsAreDeleted,
},
{
subTest: "update deployment",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{
NumberOfInstances: int32ToPointer(1),
},
},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{
NumberOfInstances: int32ToPointer(2),
},
},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: deploymentUpdated,
},
{
subTest: "update deployment",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{
NumberOfInstances: int32ToPointer(1),
},
},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{
NumberOfInstances: int32ToPointer(2),
},
},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: deploymentUpdated,
},
{
subTest: "update image from changed defaults",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:2.0",
defaultInstances: 2,
check: deploymentUpdated,
},
{
subTest: "there is no sync from nil to an empty spec",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableConnectionPooler: boolToPointer(true),
ConnectionPooler: nil,
},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableConnectionPooler: boolToPointer(true),
ConnectionPooler: &acidv1.ConnectionPooler{},
},
},
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
defaultImage: "pooler:1.0",
defaultInstances: 1,
check: noEmptySync,
},
}
for _, tt := range tests {
tt.cluster.OpConfig.ConnectionPooler.Image = tt.defaultImage
tt.cluster.OpConfig.ConnectionPooler.NumberOfInstances =
int32ToPointer(tt.defaultInstances)
reason, err := tt.cluster.syncConnectionPooler(tt.oldSpec,
tt.newSpec, mockInstallLookupFunction)
if err := tt.check(tt.cluster, err, reason); err != nil {
t.Errorf("%s [%s]: Could not synchronize, %+v",
testName, tt.subTest, err)
}
}
}
func TestConnectionPoolerPodSpec(t *testing.T) {
testName := "Test connection pooler pod template generation"
var cluster = New(
Config{
OpConfig: config.Config{
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
ConnectionPooler: config.ConnectionPooler{
MaxDBConnections: int32ToPointer(60),
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
cluster.Spec = acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
}
var clusterNoDefaultRes = New(
Config{
OpConfig: config.Config{
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
ConnectionPooler: config.ConnectionPooler{},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
clusterNoDefaultRes.Spec = acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
}
noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil }
tests := []struct {
subTest string
spec *acidv1.PostgresSpec
expected error
cluster *Cluster
check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error
}{
{
subTest: "default configuration",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
expected: nil,
cluster: cluster,
check: noCheck,
},
{
subTest: "no default resources",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`),
cluster: clusterNoDefaultRes,
check: noCheck,
},
{
subTest: "default resources are set",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
expected: nil,
cluster: cluster,
check: testResources,
},
{
subTest: "labels for service",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
expected: nil,
cluster: cluster,
check: testLabels,
},
{
subTest: "required envs",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
expected: nil,
cluster: cluster,
check: testEnvs,
},
}
for _, role := range [2]PostgresRole{Master, Replica} {
for _, tt := range tests {
podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(role)
if err != tt.expected && err.Error() != tt.expected.Error() {
t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v",
testName, tt.subTest, err, tt.expected)
}
err = tt.check(cluster, podSpec, role)
if err != nil {
t.Errorf("%s [%s]: Pod spec is incorrect, %+v",
testName, tt.subTest, err)
}
}
}
}
func TestConnectionPoolerDeploymentSpec(t *testing.T) {
testName := "Test connection pooler deployment spec generation"
var cluster = New(
Config{
OpConfig: config.Config{
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
cluster.Statefulset = &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-sts",
},
}
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{
Master: {
Deployment: nil,
Service: nil,
LookupFunction: true,
Name: "",
Role: Master,
},
}
noCheck := func(cluster *Cluster, deployment *appsv1.Deployment) error {
return nil
}
tests := []struct {
subTest string
spec *acidv1.PostgresSpec
expected error
cluster *Cluster
check func(cluster *Cluster, deployment *appsv1.Deployment) error
}{
{
subTest: "default configuration",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
expected: nil,
cluster: cluster,
check: noCheck,
},
{
subTest: "owner reference",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
expected: nil,
cluster: cluster,
check: testDeploymentOwnerReference,
},
{
subTest: "selector",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
expected: nil,
cluster: cluster,
check: testSelector,
},
}
for _, tt := range tests {
deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[Master])
if err != tt.expected && err.Error() != tt.expected.Error() {
t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v",
testName, tt.subTest, err, tt.expected)
}
err = tt.check(cluster, deployment)
if err != nil {
t.Errorf("%s [%s]: Deployment spec is incorrect, %+v",
testName, tt.subTest, err)
}
}
}
func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error {
cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"]
if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest {
return fmt.Errorf("CPU request does not match, got %s, expected %s",
cpuReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest)
}
memReq := podSpec.Spec.Containers[0].Resources.Requests["memory"]
if memReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest {
return fmt.Errorf("Memory request does not match, got %s, expected %s",
memReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest)
}
cpuLim := podSpec.Spec.Containers[0].Resources.Limits["cpu"]
if cpuLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit {
return fmt.Errorf("CPU limit does not match, got %s, expected %s",
cpuLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit)
}
memLim := podSpec.Spec.Containers[0].Resources.Limits["memory"]
if memLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit {
return fmt.Errorf("Memory limit does not match, got %s, expected %s",
memLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit)
}
return nil
}
func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error {
poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"]
if poolerLabels != cluster.connectionPoolerLabels(role, true).MatchLabels["connection-pooler"] {
return fmt.Errorf("Pod labels do not match, got %+v, expected %+v",
podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabels(role, true).MatchLabels)
}
return nil
}
func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error {
labels := deployment.Spec.Selector.MatchLabels
expected := cluster.connectionPoolerLabels(Master, true).MatchLabels
if labels["connection-pooler"] != expected["connection-pooler"] {
return fmt.Errorf("Labels are incorrect, got %+v, expected %+v",
labels, expected)
}
return nil
}
func testServiceSelector(cluster *Cluster, service *v1.Service, role PostgresRole) error {
selector := service.Spec.Selector
if selector["connection-pooler"] != cluster.connectionPoolerName(role) {
return fmt.Errorf("Selector is incorrect, got %s, expected %s",
selector["connection-pooler"], cluster.connectionPoolerName(role))
}
return nil
}
func TestConnectionPoolerServiceSpec(t *testing.T) {
testName := "Test connection pooler service spec generation"
var cluster = New(
Config{
OpConfig: config.Config{
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
cluster.Statefulset = &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-sts",
},
}
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{
Master: {
Deployment: nil,
Service: nil,
LookupFunction: false,
Role: Master,
},
Replica: {
Deployment: nil,
Service: nil,
LookupFunction: false,
Role: Replica,
},
}
noCheck := func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error {
return nil
}
tests := []struct {
subTest string
spec *acidv1.PostgresSpec
cluster *Cluster
check func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error
}{
{
subTest: "default configuration",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
cluster: cluster,
check: noCheck,
},
{
subTest: "owner reference",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
cluster: cluster,
check: testServiceOwnerReference,
},
{
subTest: "selector",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
cluster: cluster,
check: testServiceSelector,
},
}
for _, role := range [2]PostgresRole{Master, Replica} {
for _, tt := range tests {
service := tt.cluster.generateConnectionPoolerService(tt.cluster.ConnectionPooler[role])
if err := tt.check(cluster, service, role); err != nil {
t.Errorf("%s [%s]: Service spec is incorrect, %+v",
testName, tt.subTest, err)
}
}
}
}

View File

@ -101,15 +101,20 @@ func (c *Cluster) databaseAccessDisabled() bool {
} }
func (c *Cluster) initDbConn() error { func (c *Cluster) initDbConn() error {
return c.initDbConnWithName("")
}
func (c *Cluster) initDbConnWithName(dbname string) error {
c.setProcessName("initializing db connection")
if c.pgDb != nil { if c.pgDb != nil {
return nil return nil
} }
return c.initDbConnWithName("")
}
// Worker function for connection initialization. This function does not check
// if the connection is already open, if it is then it will be overwritten.
// Callers need to make sure no connection is open, otherwise we could leak
// connections
func (c *Cluster) initDbConnWithName(dbname string) error {
c.setProcessName("initializing db connection")
var conn *sql.DB var conn *sql.DB
connstring := c.pgConnectionString(dbname) connstring := c.pgConnectionString(dbname)
@ -126,12 +131,12 @@ func (c *Cluster) initDbConnWithName(dbname string) error {
} }
if _, ok := err.(*net.OpError); ok { if _, ok := err.(*net.OpError); ok {
c.logger.Errorf("could not connect to PostgreSQL database: %v", err) c.logger.Warningf("could not connect to Postgres database: %v", err)
return false, nil return false, nil
} }
if err2 := conn.Close(); err2 != nil { if err2 := conn.Close(); err2 != nil {
c.logger.Errorf("error when closing PostgreSQL connection after another error: %v", err) c.logger.Errorf("error when closing Postgres connection after another error: %v", err)
return false, err2 return false, err2
} }
@ -145,6 +150,12 @@ func (c *Cluster) initDbConnWithName(dbname string) error {
conn.SetMaxOpenConns(1) conn.SetMaxOpenConns(1)
conn.SetMaxIdleConns(-1) conn.SetMaxIdleConns(-1)
if c.pgDb != nil {
msg := "closing an existing connection before opening a new one to %s"
c.logger.Warningf(msg, dbname)
c.closeDbConn()
}
c.pgDb = conn c.pgDb = conn
return nil return nil
@ -155,7 +166,7 @@ func (c *Cluster) connectionIsClosed() bool {
} }
func (c *Cluster) closeDbConn() (err error) { func (c *Cluster) closeDbConn() (err error) {
c.setProcessName("closing db connection") c.setProcessName("closing database connection")
if c.pgDb != nil { if c.pgDb != nil {
c.logger.Debug("closing database connection") c.logger.Debug("closing database connection")
if err = c.pgDb.Close(); err != nil { if err = c.pgDb.Close(); err != nil {
@ -170,7 +181,7 @@ func (c *Cluster) closeDbConn() (err error) {
} }
func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUserMap, err error) { func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUserMap, err error) {
c.setProcessName("reading users from the db") c.setProcessName("reading users from the database")
var rows *sql.Rows var rows *sql.Rows
users = make(spec.PgUserMap) users = make(spec.PgUserMap)
if rows, err = c.pgDb.Query(getUserSQL, pq.Array(userNames)); err != nil { if rows, err = c.pgDb.Query(getUserSQL, pq.Array(userNames)); err != nil {
@ -462,11 +473,14 @@ func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doi
} }
// Creates a connection pool credentials lookup function in every database to // Creates a connection pool credentials lookup function in every database to
// perform remote authentification. // perform remote authentication.
func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string, role PostgresRole) error {
var stmtBytes bytes.Buffer var stmtBytes bytes.Buffer
c.logger.Info("Installing lookup function") c.logger.Info("Installing lookup function")
// Open a new connection if not yet done. This connection will be used only
// to get the list of databases, not for the actuall installation.
if err := c.initDbConn(); err != nil { if err := c.initDbConn(); err != nil {
return fmt.Errorf("could not init database connection") return fmt.Errorf("could not init database connection")
} }
@ -480,36 +494,40 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
} }
}() }()
// List of databases we failed to process. At the moment it function just
// like a flag to retry on the next sync, but in the future we may want to
// retry only necessary parts, so let's keep the list.
failedDatabases := []string{}
currentDatabases, err := c.getDatabases() currentDatabases, err := c.getDatabases()
if err != nil { if err != nil {
msg := "could not get databases to install pooler lookup function: %v" msg := "could not get databases to install pooler lookup function: %v"
return fmt.Errorf(msg, err) return fmt.Errorf(msg, err)
} }
// We've got the list of target databases, now close this connection to
// open a new one to every each of them.
if err := c.closeDbConn(); err != nil {
c.logger.Errorf("could not close database connection: %v", err)
}
templater := template.Must(template.New("sql").Parse(connectionPoolerLookup)) templater := template.Must(template.New("sql").Parse(connectionPoolerLookup))
params := TemplateParams{
"pooler_schema": poolerSchema,
"pooler_user": poolerUser,
}
if err := templater.Execute(&stmtBytes, params); err != nil {
msg := "could not prepare sql statement %+v: %v"
return fmt.Errorf(msg, params, err)
}
for dbname := range currentDatabases { for dbname := range currentDatabases {
if dbname == "template0" || dbname == "template1" { if dbname == "template0" || dbname == "template1" {
continue continue
} }
if err := c.initDbConnWithName(dbname); err != nil { c.logger.Infof("install pooler lookup function into database '%s'", dbname)
return fmt.Errorf("could not init database connection to %s", dbname)
}
c.logger.Infof("Install pooler lookup function into %s", dbname)
params := TemplateParams{
"pooler_schema": poolerSchema,
"pooler_user": poolerUser,
}
if err := templater.Execute(&stmtBytes, params); err != nil {
c.logger.Errorf("could not prepare sql statement %+v: %v",
params, err)
// process other databases
continue
}
// golang sql will do retries couple of times if pq driver reports // golang sql will do retries couple of times if pq driver reports
// connections issues (driver.ErrBadConn), but since our query is // connections issues (driver.ErrBadConn), but since our query is
@ -520,7 +538,20 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
constants.PostgresConnectTimeout, constants.PostgresConnectTimeout,
constants.PostgresConnectRetryTimeout, constants.PostgresConnectRetryTimeout,
func() (bool, error) { func() (bool, error) {
if _, err := c.pgDb.Exec(stmtBytes.String()); err != nil {
// At this moment we are not connected to any database
if err := c.initDbConnWithName(dbname); err != nil {
msg := "could not init database connection to %s"
return false, fmt.Errorf(msg, dbname)
}
defer func() {
if err := c.closeDbConn(); err != nil {
msg := "could not close database connection: %v"
c.logger.Errorf(msg, err)
}
}()
if _, err = c.pgDb.Exec(stmtBytes.String()); err != nil {
msg := fmt.Errorf("could not execute sql statement %s: %v", msg := fmt.Errorf("could not execute sql statement %s: %v",
stmtBytes.String(), err) stmtBytes.String(), err)
return false, msg return false, msg
@ -533,15 +564,15 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
c.logger.Errorf("could not execute after retries %s: %v", c.logger.Errorf("could not execute after retries %s: %v",
stmtBytes.String(), err) stmtBytes.String(), err)
// process other databases // process other databases
failedDatabases = append(failedDatabases, dbname)
continue continue
} }
c.logger.Infof("pooler lookup function installed into %s", dbname) c.logger.Infof("pooler lookup function installed into %s", dbname)
if err := c.closeDbConn(); err != nil {
c.logger.Errorf("could not close database connection: %v", err)
}
} }
c.ConnectionPooler.LookupFunction = true if len(failedDatabases) == 0 {
c.ConnectionPooler[role].LookupFunction = true
}
return nil return nil
} }

View File

@ -7,6 +7,7 @@ import (
"path" "path"
"sort" "sort"
"strconv" "strconv"
"strings"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -20,7 +21,6 @@ import (
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/spec"
pkgspec "github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/constants"
@ -75,10 +75,6 @@ func (c *Cluster) statefulSetName() string {
return c.Name return c.Name
} }
func (c *Cluster) connectionPoolerName() string {
return c.Name + "-pooler"
}
func (c *Cluster) endpointName(role PostgresRole) string { func (c *Cluster) endpointName(role PostgresRole) string {
name := c.Name name := c.Name
if role == Replica { if role == Replica {
@ -142,26 +138,6 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources {
} }
} }
// Generate default resource section for connection pooler deployment, to be
// used if nothing custom is specified in the manifest
func (c *Cluster) makeDefaultConnectionPoolerResources() acidv1.Resources {
config := c.OpConfig
defaultRequests := acidv1.ResourceDescription{
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest,
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest,
}
defaultLimits := acidv1.ResourceDescription{
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit,
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit,
}
return acidv1.Resources{
ResourceRequests: defaultRequests,
ResourceLimits: defaultLimits,
}
}
func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) { func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) {
var err error var err error
@ -213,7 +189,7 @@ func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceD
return requests, nil return requests, nil
} }
func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, logger *logrus.Entry) (string, error) { func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, EnablePgVersionEnvVar bool, logger *logrus.Entry) (string, error) {
config := spiloConfiguration{} config := spiloConfiguration{}
config.Bootstrap = pgBootstrap{} config.Bootstrap = pgBootstrap{}
@ -294,7 +270,14 @@ PatroniInitDBParams:
} }
config.PgLocalConfiguration = make(map[string]interface{}) config.PgLocalConfiguration = make(map[string]interface{})
config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion)
// the newer and preferred way to specify the PG version is to use the `PGVERSION` env variable
// setting postgresq.bin_dir in the SPILO_CONFIGURATION still works and takes precedence over PGVERSION
// so we add postgresq.bin_dir only if PGVERSION is unused
// see PR 222 in Spilo
if !EnablePgVersionEnvVar {
config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion)
}
if len(pg.Parameters) > 0 { if len(pg.Parameters) > 0 {
local, bootstrap := getLocalAndBoostrapPostgreSQLParameters(pg.Parameters) local, bootstrap := getLocalAndBoostrapPostgreSQLParameters(pg.Parameters)
@ -337,25 +320,39 @@ func getLocalAndBoostrapPostgreSQLParameters(parameters map[string]string) (loca
return return
} }
func nodeAffinity(nodeReadinessLabel map[string]string) *v1.Affinity { func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity {
matchExpressions := make([]v1.NodeSelectorRequirement, 0) if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
if len(nodeReadinessLabel) == 0 {
return nil return nil
} }
for k, v := range nodeReadinessLabel { nodeAffinityCopy := *&v1.NodeAffinity{}
matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{ if nodeAffinity != nil {
Key: k, nodeAffinityCopy = *nodeAffinity.DeepCopy()
Operator: v1.NodeSelectorOpIn, }
Values: []string{v}, if len(nodeReadinessLabel) > 0 {
}) matchExpressions := make([]v1.NodeSelectorRequirement, 0)
for k, v := range nodeReadinessLabel {
matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{
Key: k,
Operator: v1.NodeSelectorOpIn,
Values: []string{v},
})
}
nodeReadinessSelectorTerm := v1.NodeSelectorTerm{MatchExpressions: matchExpressions}
if nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution == nil {
nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
nodeReadinessSelectorTerm,
},
}
} else {
nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
NodeSelectorTerms: append(nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, nodeReadinessSelectorTerm),
}
}
} }
return &v1.Affinity{ return &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{ NodeAffinity: &nodeAffinityCopy,
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{{MatchExpressions: matchExpressions}},
},
},
} }
} }
@ -531,7 +528,7 @@ func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, su
}, },
}, },
} }
mergedEnv := append(container.Env, env...) mergedEnv := append(env, container.Env...)
container.Env = deduplicateEnvVars(mergedEnv, container.Name, logger) container.Env = deduplicateEnvVars(mergedEnv, container.Name, logger)
result = append(result, container) result = append(result, container)
} }
@ -557,8 +554,11 @@ func (c *Cluster) generatePodTemplate(
initContainers []v1.Container, initContainers []v1.Container,
sidecarContainers []v1.Container, sidecarContainers []v1.Container,
tolerationsSpec *[]v1.Toleration, tolerationsSpec *[]v1.Toleration,
spiloRunAsUser *int64,
spiloRunAsGroup *int64,
spiloFSGroup *int64, spiloFSGroup *int64,
nodeAffinity *v1.Affinity, nodeAffinity *v1.Affinity,
schedulerName *string,
terminateGracePeriod int64, terminateGracePeriod int64,
podServiceAccountName string, podServiceAccountName string,
kubeIAMRole string, kubeIAMRole string,
@ -576,6 +576,14 @@ func (c *Cluster) generatePodTemplate(
containers = append(containers, sidecarContainers...) containers = append(containers, sidecarContainers...)
securityContext := v1.PodSecurityContext{} securityContext := v1.PodSecurityContext{}
if spiloRunAsUser != nil {
securityContext.RunAsUser = spiloRunAsUser
}
if spiloRunAsGroup != nil {
securityContext.RunAsGroup = spiloRunAsGroup
}
if spiloFSGroup != nil { if spiloFSGroup != nil {
securityContext.FSGroup = spiloFSGroup securityContext.FSGroup = spiloFSGroup
} }
@ -589,6 +597,10 @@ func (c *Cluster) generatePodTemplate(
SecurityContext: &securityContext, SecurityContext: &securityContext,
} }
if schedulerName != nil {
podSpec.SchedulerName = *schedulerName
}
if shmVolume != nil && *shmVolume { if shmVolume != nil && *shmVolume {
addShmVolume(&podSpec) addShmVolume(&podSpec)
} }
@ -705,6 +717,9 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
Value: c.OpConfig.PamRoleName, Value: c.OpConfig.PamRoleName,
}, },
} }
if c.OpConfig.EnablePgVersionEnvVar {
envVars = append(envVars, v1.EnvVar{Name: "PGVERSION", Value: c.Spec.PgVersion})
}
// Spilo expects cluster labels as JSON // Spilo expects cluster labels as JSON
if clusterLabels, err := json.Marshal(labels.Set(c.OpConfig.ClusterLabels)); err != nil { if clusterLabels, err := json.Marshal(labels.Set(c.OpConfig.ClusterLabels)); err != nil {
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_LABELS", Value: labels.Set(c.OpConfig.ClusterLabels).String()}) envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_LABELS", Value: labels.Set(c.OpConfig.ClusterLabels).String()})
@ -714,17 +729,6 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
if spiloConfiguration != "" { if spiloConfiguration != "" {
envVars = append(envVars, v1.EnvVar{Name: "SPILO_CONFIGURATION", Value: spiloConfiguration}) envVars = append(envVars, v1.EnvVar{Name: "SPILO_CONFIGURATION", Value: spiloConfiguration})
} }
if c.OpConfig.WALES3Bucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}
if c.OpConfig.LogS3Bucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket})
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""})
}
if c.patroniUsesKubernetes() { if c.patroniUsesKubernetes() {
envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"}) envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"})
@ -736,7 +740,7 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"}) envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"})
} }
if cloneDescription.ClusterName != "" { if cloneDescription != nil && cloneDescription.ClusterName != "" {
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...) envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
} }
@ -744,10 +748,34 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...) envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...)
} }
// add vars taken from pod_environment_configmap and pod_environment_secret first
// (to allow them to override the globals set in the operator config)
if len(customPodEnvVarsList) > 0 { if len(customPodEnvVarsList) > 0 {
envVars = append(envVars, customPodEnvVarsList...) envVars = append(envVars, customPodEnvVarsList...)
} }
if c.OpConfig.WALES3Bucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}
if c.OpConfig.WALGSBucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "WAL_GS_BUCKET", Value: c.OpConfig.WALGSBucket})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}
if c.OpConfig.GCPCredentials != "" {
envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials})
}
if c.OpConfig.LogS3Bucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket})
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""})
}
return envVars return envVars
} }
@ -766,13 +794,81 @@ func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus.
result = append(result, input[i]) result = append(result, input[i])
} else if names[va.Name] == 1 { } else if names[va.Name] == 1 {
names[va.Name]++ names[va.Name]++
logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored",
va.Name, containerName) // Some variables (those to configure the WAL_ and LOG_ shipping) may be overwritten, only log as info
if strings.HasPrefix(va.Name, "WAL_") || strings.HasPrefix(va.Name, "LOG_") {
logger.Infof("global variable %q has been overwritten by configmap/secret for container %q",
va.Name, containerName)
} else {
logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored",
va.Name, containerName)
}
} }
} }
return result return result
} }
// Return list of variables the pod recieved from the configured ConfigMap
func (c *Cluster) getPodEnvironmentConfigMapVariables() ([]v1.EnvVar, error) {
configMapPodEnvVarsList := make([]v1.EnvVar, 0)
if c.OpConfig.PodEnvironmentConfigMap.Name == "" {
return configMapPodEnvVarsList, nil
}
cm, err := c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get(
context.TODO(),
c.OpConfig.PodEnvironmentConfigMap.Name,
metav1.GetOptions{})
if err != nil {
// if not found, try again using the cluster's namespace if it's different (old behavior)
if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace {
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(
context.TODO(),
c.OpConfig.PodEnvironmentConfigMap.Name,
metav1.GetOptions{})
}
if err != nil {
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err)
}
}
for k, v := range cm.Data {
configMapPodEnvVarsList = append(configMapPodEnvVarsList, v1.EnvVar{Name: k, Value: v})
}
return configMapPodEnvVarsList, nil
}
// Return list of variables the pod received from the configured Secret
func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) {
secretPodEnvVarsList := make([]v1.EnvVar, 0)
if c.OpConfig.PodEnvironmentSecret == "" {
return secretPodEnvVarsList, nil
}
secret, err := c.KubeClient.Secrets(c.Namespace).Get(
context.TODO(),
c.OpConfig.PodEnvironmentSecret,
metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("could not read Secret PodEnvironmentSecretName: %v", err)
}
for k := range secret.Data {
secretPodEnvVarsList = append(secretPodEnvVarsList,
v1.EnvVar{Name: k, ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: c.OpConfig.PodEnvironmentSecret,
},
Key: k,
},
}})
}
return secretPodEnvVarsList, nil
}
func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container { func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container {
name := sidecar.Name name := sidecar.Name
if name == "" { if name == "" {
@ -818,41 +914,6 @@ func extractPgVersionFromBinPath(binPath string, template string) (string, error
return fmt.Sprintf("%v", pgVersion), nil return fmt.Sprintf("%v", pgVersion), nil
} }
func (c *Cluster) getNewPgVersion(container v1.Container, newPgVersion string) (string, error) {
var (
spiloConfiguration spiloConfiguration
runningPgVersion string
err error
)
for _, env := range container.Env {
if env.Name != "SPILO_CONFIGURATION" {
continue
}
err = json.Unmarshal([]byte(env.Value), &spiloConfiguration)
if err != nil {
return newPgVersion, err
}
}
if len(spiloConfiguration.PgLocalConfiguration) > 0 {
currentBinPath := fmt.Sprintf("%v", spiloConfiguration.PgLocalConfiguration[patroniPGBinariesParameterName])
runningPgVersion, err = extractPgVersionFromBinPath(currentBinPath, pgBinariesLocationTemplate)
if err != nil {
return "", fmt.Errorf("could not extract Postgres version from %v in SPILO_CONFIGURATION", currentBinPath)
}
} else {
return "", fmt.Errorf("could not find %q setting in SPILO_CONFIGURATION", patroniPGBinariesParameterName)
}
if runningPgVersion != newPgVersion {
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", runningPgVersion, newPgVersion)
newPgVersion = runningPgVersion
}
return newPgVersion, nil
}
func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.StatefulSet, error) { func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.StatefulSet, error) {
var ( var (
@ -932,32 +993,34 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
initContainers = spec.InitContainers initContainers = spec.InitContainers
} }
customPodEnvVarsList := make([]v1.EnvVar, 0) spiloCompathWalPathList := make([]v1.EnvVar, 0)
if c.OpConfig.EnableSpiloWalPathCompat {
if c.OpConfig.PodEnvironmentConfigMap != (pkgspec.NamespacedName{}) { spiloCompathWalPathList = append(spiloCompathWalPathList,
var cm *v1.ConfigMap v1.EnvVar{
cm, err = c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get( Name: "ENABLE_WAL_PATH_COMPAT",
context.TODO(), Value: "true",
c.OpConfig.PodEnvironmentConfigMap.Name, },
metav1.GetOptions{}) )
if err != nil {
// if not found, try again using the cluster's namespace if it's different (old behavior)
if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace {
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(
context.TODO(),
c.OpConfig.PodEnvironmentConfigMap.Name,
metav1.GetOptions{})
}
if err != nil {
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err)
}
}
for k, v := range cm.Data {
customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v})
}
sort.Slice(customPodEnvVarsList,
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })
} }
// fetch env vars from custom ConfigMap
configMapEnvVarsList, err := c.getPodEnvironmentConfigMapVariables()
if err != nil {
return nil, err
}
// fetch env vars from custom ConfigMap
secretEnvVarsList, err := c.getPodEnvironmentSecretVariables()
if err != nil {
return nil, err
}
// concat all custom pod env vars and sort them
customPodEnvVarsList := append(spiloCompathWalPathList, configMapEnvVarsList...)
customPodEnvVarsList = append(customPodEnvVarsList, secretEnvVarsList...)
sort.Slice(customPodEnvVarsList,
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })
if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" { if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" {
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster") return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
} }
@ -984,7 +1047,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
} }
} }
spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger) spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.OpConfig.EnablePgVersionEnvVar, c.logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err) return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err)
} }
@ -993,7 +1056,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
spiloEnvVars := c.generateSpiloPodEnvVars( spiloEnvVars := c.generateSpiloPodEnvVars(
c.Postgresql.GetUID(), c.Postgresql.GetUID(),
spiloConfiguration, spiloConfiguration,
&spec.Clone, spec.Clone,
spec.StandbyCluster, spec.StandbyCluster,
customPodEnvVarsList, customPodEnvVarsList,
) )
@ -1001,7 +1064,17 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
// pickup the docker image for the spilo container // pickup the docker image for the spilo container
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage) effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
// determine the FSGroup for the spilo pod // determine the User, Group and FSGroup for the spilo pod
effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser
if spec.SpiloRunAsUser != nil {
effectiveRunAsUser = spec.SpiloRunAsUser
}
effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup
if spec.SpiloRunAsGroup != nil {
effectiveRunAsGroup = spec.SpiloRunAsGroup
}
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
if spec.SpiloFSGroup != nil { if spec.SpiloFSGroup != nil {
effectiveFSGroup = spec.SpiloFSGroup effectiveFSGroup = spec.SpiloFSGroup
@ -1065,7 +1138,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
} }
// generate the spilo container // generate the spilo container
c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars) c.logger.Debugf("Generating Spilo container, environment variables")
c.logger.Debugf("%v", spiloEnvVars)
spiloContainer := generateContainer(c.containerName(), spiloContainer := generateContainer(c.containerName(),
&effectiveDockerImage, &effectiveDockerImage,
resourceRequirements, resourceRequirements,
@ -1134,19 +1209,22 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
annotations := c.generatePodAnnotations(spec) podAnnotations := c.generatePodAnnotations(spec)
// generate pod template for the statefulset, based on the spilo container and sidecars // generate pod template for the statefulset, based on the spilo container and sidecars
podTemplate, err = c.generatePodTemplate( podTemplate, err = c.generatePodTemplate(
c.Namespace, c.Namespace,
c.labelsSet(true), c.labelsSet(true),
annotations, c.annotationsSet(podAnnotations),
spiloContainer, spiloContainer,
initContainers, initContainers,
sidecarContainers, sidecarContainers,
&tolerationSpec, &tolerationSpec,
effectiveRunAsUser,
effectiveRunAsGroup,
effectiveFSGroup, effectiveFSGroup,
nodeAffinity(c.OpConfig.NodeReadinessLabel), nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity),
spec.SchedulerName,
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
c.OpConfig.PodServiceAccountName, c.OpConfig.PodServiceAccountName,
c.OpConfig.KubeIAMRole, c.OpConfig.KubeIAMRole,
@ -1183,15 +1261,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy) return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
} }
annotations = make(map[string]string) stsAnnotations := make(map[string]string)
annotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false) stsAnnotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false)
stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil))
statefulSet := &appsv1.StatefulSet{ statefulSet := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: c.statefulSetName(), Name: c.statefulSetName(),
Namespace: c.Namespace, Namespace: c.Namespace,
Labels: c.labelsSet(true), Labels: c.labelsSet(true),
Annotations: c.AnnotationsToPropagate(annotations), Annotations: stsAnnotations,
}, },
Spec: appsv1.StatefulSetSpec{ Spec: appsv1.StatefulSetSpec{
Replicas: &numberOfInstances, Replicas: &numberOfInstances,
@ -1484,9 +1563,10 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
username := pgUser.Name username := pgUser.Name
secret := v1.Secret{ secret := v1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: c.credentialSecretName(username), Name: c.credentialSecretName(username),
Namespace: namespace, Namespace: namespace,
Labels: c.labelsSet(true), Labels: c.labelsSet(true),
Annotations: c.annotationsSet(nil),
}, },
Type: v1.SecretTypeOpaque, Type: v1.SecretTypeOpaque,
Data: map[string][]byte{ Data: map[string][]byte{
@ -1547,6 +1627,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
} }
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges) c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
serviceSpec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(c.OpConfig.ExternalTrafficPolicy)
serviceSpec.Type = v1.ServiceTypeLoadBalancer serviceSpec.Type = v1.ServiceTypeLoadBalancer
} else if role == Replica { } else if role == Replica {
// before PR #258, the replica service was only created if allocated a LB // before PR #258, the replica service was only created if allocated a LB
@ -1559,7 +1640,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
Name: c.serviceName(role), Name: c.serviceName(role),
Namespace: c.Namespace, Namespace: c.Namespace,
Labels: c.roleLabelsSet(true, role), Labels: c.roleLabelsSet(true, role),
Annotations: c.generateServiceAnnotations(role, spec), Annotations: c.annotationsSet(c.generateServiceAnnotations(role, spec)),
}, },
Spec: serviceSpec, Spec: serviceSpec,
} }
@ -1657,11 +1738,31 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
msg := "Figure out which S3 bucket to use from env" msg := "Figure out which S3 bucket to use from env"
c.logger.Info(msg, description.S3WalPath) c.logger.Info(msg, description.S3WalPath)
if c.OpConfig.WALES3Bucket != "" {
envs := []v1.EnvVar{
{
Name: "CLONE_WAL_S3_BUCKET",
Value: c.OpConfig.WALES3Bucket,
},
}
result = append(result, envs...)
} else if c.OpConfig.WALGSBucket != "" {
envs := []v1.EnvVar{
{
Name: "CLONE_WAL_GS_BUCKET",
Value: c.OpConfig.WALGSBucket,
},
{
Name: "CLONE_GOOGLE_APPLICATION_CREDENTIALS",
Value: c.OpConfig.GCPCredentials,
},
}
result = append(result, envs...)
} else {
c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.")
}
envs := []v1.EnvVar{ envs := []v1.EnvVar{
{
Name: "CLONE_WAL_S3_BUCKET",
Value: c.OpConfig.WALES3Bucket,
},
{ {
Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX",
Value: getBucketScopeSuffix(description.UID), Value: getBucketScopeSuffix(description.UID),
@ -1742,9 +1843,10 @@ func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget
return &policybeta1.PodDisruptionBudget{ return &policybeta1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: c.podDisruptionBudgetName(), Name: c.podDisruptionBudgetName(),
Namespace: c.Namespace, Namespace: c.Namespace,
Labels: c.labelsSet(true), Labels: c.labelsSet(true),
Annotations: c.annotationsSet(nil),
}, },
Spec: policybeta1.PodDisruptionBudgetSpec{ Spec: policybeta1.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable, MinAvailable: &minAvailable,
@ -1824,7 +1926,10 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
[]v1.Container{}, []v1.Container{},
&[]v1.Toleration{}, &[]v1.Toleration{},
nil, nil,
nodeAffinity(c.OpConfig.NodeReadinessLabel), nil,
nil,
nodeAffinity(c.OpConfig.NodeReadinessLabel, nil),
nil,
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
c.OpConfig.PodServiceAccountName, c.OpConfig.PodServiceAccountName,
c.OpConfig.KubeIAMRole, c.OpConfig.KubeIAMRole,
@ -1861,9 +1966,10 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
cronJob := &batchv1beta1.CronJob{ cronJob := &batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: c.getLogicalBackupJobName(), Name: c.getLogicalBackupJobName(),
Namespace: c.Namespace, Namespace: c.Namespace,
Labels: c.labelsSet(true), Labels: c.labelsSet(true),
Annotations: c.annotationsSet(nil),
}, },
Spec: batchv1beta1.CronJobSpec{ Spec: batchv1beta1.CronJobSpec{
Schedule: schedule, Schedule: schedule,
@ -1896,6 +2002,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
}, },
}, },
// Bucket env vars // Bucket env vars
{
Name: "LOGICAL_BACKUP_PROVIDER",
Value: c.OpConfig.LogicalBackup.LogicalBackupProvider,
},
{ {
Name: "LOGICAL_BACKUP_S3_BUCKET", Name: "LOGICAL_BACKUP_S3_BUCKET",
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket, Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket,
@ -1916,6 +2026,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX", Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX",
Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())), Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())),
}, },
{
Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS",
Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials,
},
// Postgres env vars // Postgres env vars
{ {
Name: "PG_VERSION", Name: "PG_VERSION",
@ -1958,7 +2072,8 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
envVars = append(envVars, v1.EnvVar{Name: "AWS_SECRET_ACCESS_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey}) envVars = append(envVars, v1.EnvVar{Name: "AWS_SECRET_ACCESS_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey})
} }
c.logger.Debugf("Generated logical backup env vars %v", envVars) c.logger.Debugf("Generated logical backup env vars")
c.logger.Debugf("%v", envVars)
return envVars return envVars
} }
@ -1967,179 +2082,6 @@ func (c *Cluster) getLogicalBackupJobName() (jobName string) {
return "logical-backup-" + c.clusterName().Name return "logical-backup-" + c.clusterName().Name
} }
// Generate pool size related environment variables.
//
// MAX_DB_CONN would specify the global maximum for connections to a target
// database.
//
// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough.
//
// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when
// most of the queries coming through a connection pooler are from the same
// user to the same db). In case if we want to spin up more connection pooler
// instances, take this into account and maintain the same number of
// connections.
//
// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload
// have to wait for spinning up a new connections.
//
// RESERVE_SIZE is how many additional connections to allow for a pooler.
func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar {
effectiveMode := util.Coalesce(
spec.ConnectionPooler.Mode,
c.OpConfig.ConnectionPooler.Mode)
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
if numberOfInstances == nil {
numberOfInstances = util.CoalesceInt32(
c.OpConfig.ConnectionPooler.NumberOfInstances,
k8sutil.Int32ToPointer(1))
}
effectiveMaxDBConn := util.CoalesceInt32(
spec.ConnectionPooler.MaxDBConnections,
c.OpConfig.ConnectionPooler.MaxDBConnections)
if effectiveMaxDBConn == nil {
effectiveMaxDBConn = k8sutil.Int32ToPointer(
constants.ConnectionPoolerMaxDBConnections)
}
maxDBConn := *effectiveMaxDBConn / *numberOfInstances
defaultSize := maxDBConn / 2
minSize := defaultSize / 2
reserveSize := minSize
return []v1.EnvVar{
{
Name: "CONNECTION_POOLER_PORT",
Value: fmt.Sprint(pgPort),
},
{
Name: "CONNECTION_POOLER_MODE",
Value: effectiveMode,
},
{
Name: "CONNECTION_POOLER_DEFAULT_SIZE",
Value: fmt.Sprint(defaultSize),
},
{
Name: "CONNECTION_POOLER_MIN_SIZE",
Value: fmt.Sprint(minSize),
},
{
Name: "CONNECTION_POOLER_RESERVE_SIZE",
Value: fmt.Sprint(reserveSize),
},
{
Name: "CONNECTION_POOLER_MAX_CLIENT_CONN",
Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections),
},
{
Name: "CONNECTION_POOLER_MAX_DB_CONN",
Value: fmt.Sprint(maxDBConn),
},
}
}
func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec) (
*v1.PodTemplateSpec, error) {
gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
resources, err := generateResourceRequirements(
spec.ConnectionPooler.Resources,
c.makeDefaultConnectionPoolerResources())
effectiveDockerImage := util.Coalesce(
spec.ConnectionPooler.DockerImage,
c.OpConfig.ConnectionPooler.Image)
effectiveSchema := util.Coalesce(
spec.ConnectionPooler.Schema,
c.OpConfig.ConnectionPooler.Schema)
if err != nil {
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
}
secretSelector := func(key string) *v1.SecretKeySelector {
effectiveUser := util.Coalesce(
spec.ConnectionPooler.User,
c.OpConfig.ConnectionPooler.User)
return &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: c.credentialSecretName(effectiveUser),
},
Key: key,
}
}
envVars := []v1.EnvVar{
{
Name: "PGHOST",
Value: c.serviceAddress(Master),
},
{
Name: "PGPORT",
Value: c.servicePort(Master),
},
{
Name: "PGUSER",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: secretSelector("username"),
},
},
// the convention is to use the same schema name as
// connection pooler username
{
Name: "PGSCHEMA",
Value: effectiveSchema,
},
{
Name: "PGPASSWORD",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: secretSelector("password"),
},
},
}
envVars = append(envVars, c.getConnectionPoolerEnvVars(spec)...)
poolerContainer := v1.Container{
Name: connectionPoolerContainer,
Image: effectiveDockerImage,
ImagePullPolicy: v1.PullIfNotPresent,
Resources: *resources,
Ports: []v1.ContainerPort{
{
ContainerPort: pgPort,
Protocol: v1.ProtocolTCP,
},
},
Env: envVars,
}
podTemplate := &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: c.connectionPoolerLabelsSelector().MatchLabels,
Namespace: c.Namespace,
Annotations: c.generatePodAnnotations(spec),
},
Spec: v1.PodSpec{
ServiceAccountName: c.OpConfig.PodServiceAccountName,
TerminationGracePeriodSeconds: &gracePeriod,
Containers: []v1.Container{poolerContainer},
// TODO: add tolerations to scheduler pooler on the same node
// as database
//Tolerations: *tolerationsSpec,
},
}
return podTemplate, nil
}
// Return an array of ownerReferences to make an arbitraty object dependent on // Return an array of ownerReferences to make an arbitraty object dependent on
// the StatefulSet. Dependency is made on StatefulSet instead of PostgreSQL CRD // the StatefulSet. Dependency is made on StatefulSet instead of PostgreSQL CRD
// while the former is represent the actual state, and only it's deletion means // while the former is represent the actual state, and only it's deletion means
@ -2165,108 +2107,6 @@ func (c *Cluster) ownerReferences() []metav1.OwnerReference {
} }
} }
func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec) (
*appsv1.Deployment, error) {
// there are two ways to enable connection pooler, either to specify a
// connectionPooler section or enableConnectionPooler. In the second case
// spec.connectionPooler will be nil, so to make it easier to calculate
// default values, initialize it to an empty structure. It could be done
// anywhere, but here is the earliest common entry point between sync and
// create code, so init here.
if spec.ConnectionPooler == nil {
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
}
podTemplate, err := c.generateConnectionPoolerPodTemplate(spec)
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
if numberOfInstances == nil {
numberOfInstances = util.CoalesceInt32(
c.OpConfig.ConnectionPooler.NumberOfInstances,
k8sutil.Int32ToPointer(1))
}
if *numberOfInstances < constants.ConnectionPoolerMinInstances {
msg := "Adjusted number of connection pooler instances from %d to %d"
c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances)
*numberOfInstances = constants.ConnectionPoolerMinInstances
}
if err != nil {
return nil, err
}
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: c.connectionPoolerName(),
Namespace: c.Namespace,
Labels: c.connectionPoolerLabelsSelector().MatchLabels,
Annotations: map[string]string{},
// make StatefulSet object its owner to represent the dependency.
// By itself StatefulSet is being deleted with "Orphaned"
// propagation policy, which means that it's deletion will not
// clean up this deployment, but there is a hope that this object
// will be garbage collected if something went wrong and operator
// didn't deleted it.
OwnerReferences: c.ownerReferences(),
},
Spec: appsv1.DeploymentSpec{
Replicas: numberOfInstances,
Selector: c.connectionPoolerLabelsSelector(),
Template: *podTemplate,
},
}
return deployment, nil
}
func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1.Service {
// there are two ways to enable connection pooler, either to specify a
// connectionPooler section or enableConnectionPooler. In the second case
// spec.connectionPooler will be nil, so to make it easier to calculate
// default values, initialize it to an empty structure. It could be done
// anywhere, but here is the earliest common entry point between sync and
// create code, so init here.
if spec.ConnectionPooler == nil {
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
}
serviceSpec := v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Name: c.connectionPoolerName(),
Port: pgPort,
TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)},
},
},
Type: v1.ServiceTypeClusterIP,
Selector: map[string]string{
"connection-pooler": c.connectionPoolerName(),
},
}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: c.connectionPoolerName(),
Namespace: c.Namespace,
Labels: c.connectionPoolerLabelsSelector().MatchLabels,
Annotations: map[string]string{},
// make StatefulSet object its owner to represent the dependency.
// By itself StatefulSet is being deleted with "Orphaned"
// propagation policy, which means that it's deletion will not
// clean up this service, but there is a hope that this object will
// be garbage collected if something went wrong and operator didn't
// deleted it.
OwnerReferences: c.ownerReferences(),
},
Spec: serviceSpec,
}
return service
}
func ensurePath(file string, defaultDir string, defaultFile string) string { func ensurePath(file string, defaultDir string, defaultFile string) string {
if file == "" { if file == "" {
return path.Join(defaultDir, defaultFile) return path.Join(defaultDir, defaultFile)

Some files were not shown because too many files have changed in this diff Show More