commit
f84252961e
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
name: Postgres Operator issue template
|
||||||
|
about: How are you using the operator?
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Please, answer some short questions which should help us to understand your problem / question better?
|
||||||
|
|
||||||
|
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.5.0
|
||||||
|
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
|
||||||
|
- **Are you running Postgres Operator in production?** [yes | no]
|
||||||
|
- **Type of issue?** [Bug report, question, feature request, etc.]
|
||||||
|
|
||||||
|
Some general remarks when posting a bug report:
|
||||||
|
- Please, check the operator, pod (Patroni) and postgresql logs first. When copy-pasting many log lines please do it in a separate GitHub gist together with your Postgres CRD and configuration manifest.
|
||||||
|
- If you feel this issue might be more related to the [Spilo](https://github.com/zalando/spilo/issues) docker image or [Patroni](https://github.com/zalando/patroni/issues), consider opening issues in the respective repos.
|
||||||
|
|
@ -30,6 +30,7 @@ _testmain.go
|
||||||
/docker/build/
|
/docker/build/
|
||||||
/github.com/
|
/github.com/
|
||||||
.idea
|
.idea
|
||||||
|
.vscode
|
||||||
|
|
||||||
scm-source.json
|
scm-source.json
|
||||||
|
|
||||||
|
|
@ -47,6 +48,8 @@ __pycache__/
|
||||||
|
|
||||||
# Distribution / packaging
|
# Distribution / packaging
|
||||||
.Python
|
.Python
|
||||||
|
ui/app/node_modules
|
||||||
|
ui/operator_ui/static/build
|
||||||
build/
|
build/
|
||||||
develop-eggs/
|
develop-eggs/
|
||||||
dist/
|
dist/
|
||||||
|
|
|
||||||
|
|
@ -18,5 +18,6 @@ install:
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- hack/verify-codegen.sh
|
- hack/verify-codegen.sh
|
||||||
- travis_wait 20 goveralls -service=travis-ci -package ./pkg/... -v
|
- travis_wait 20 go test -race -covermode atomic -coverprofile=profile.cov ./pkg/... -v
|
||||||
|
- goveralls -coverprofile=profile.cov -service=travis-ci -v
|
||||||
- make e2e
|
- make e2e
|
||||||
|
|
|
||||||
4
Makefile
4
Makefile
|
|
@ -79,7 +79,7 @@ scm-source.json: .git
|
||||||
|
|
||||||
tools:
|
tools:
|
||||||
GO111MODULE=on go get -u honnef.co/go/tools/cmd/staticcheck
|
GO111MODULE=on go get -u honnef.co/go/tools/cmd/staticcheck
|
||||||
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.16.3
|
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.18.8
|
||||||
GO111MODULE=on go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
|
|
@ -97,4 +97,4 @@ test:
|
||||||
GO111MODULE=on go test ./...
|
GO111MODULE=on go test ./...
|
||||||
|
|
||||||
e2e: docker # build operator image to be tested
|
e2e: docker # build operator image to be tested
|
||||||
cd e2e; make tools e2etest clean
|
cd e2e; make e2etest
|
||||||
|
|
|
||||||
|
|
@ -76,12 +76,6 @@ There is a browser-friendly version of this documentation at
|
||||||
* [Postgres manifest reference](docs/reference/cluster_manifest.md)
|
* [Postgres manifest reference](docs/reference/cluster_manifest.md)
|
||||||
* [Command-line options and environment variables](docs/reference/command_line_and_environment.md)
|
* [Command-line options and environment variables](docs/reference/command_line_and_environment.md)
|
||||||
|
|
||||||
## Google Summer of Code
|
|
||||||
|
|
||||||
The Postgres Operator made it to the [Google Summer of Code 2019](https://summerofcode.withgoogle.com/organizations/5429926902104064/)!
|
|
||||||
Check [our ideas](docs/gsoc-2019/ideas.md#google-summer-of-code-2019)
|
|
||||||
and start discussions in [the issue tracker](https://github.com/zalando/postgres-operator/issues).
|
|
||||||
|
|
||||||
## Community
|
## Community
|
||||||
|
|
||||||
There are two places to get in touch with the community:
|
There are two places to get in touch with the community:
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ spec:
|
||||||
- name: "RESOURCES_VISIBLE"
|
- name: "RESOURCES_VISIBLE"
|
||||||
value: "{{ .Values.envs.resourcesVisible }}"
|
value: "{{ .Values.envs.resourcesVisible }}"
|
||||||
- name: "TARGET_NAMESPACE"
|
- name: "TARGET_NAMESPACE"
|
||||||
value: {{ .Values.envs.targetNamespace }}
|
value: "{{ .Values.envs.targetNamespace }}"
|
||||||
- name: "TEAMS"
|
- name: "TEAMS"
|
||||||
value: |-
|
value: |-
|
||||||
[
|
[
|
||||||
|
|
|
||||||
|
|
@ -117,6 +117,10 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
delete_annotation_date_key:
|
||||||
|
type: string
|
||||||
|
delete_annotation_name_key:
|
||||||
|
type: string
|
||||||
downscaler_annotations:
|
downscaler_annotations:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
|
@ -131,6 +135,32 @@ spec:
|
||||||
type: boolean
|
type: boolean
|
||||||
infrastructure_roles_secret_name:
|
infrastructure_roles_secret_name:
|
||||||
type: string
|
type: string
|
||||||
|
infrastructure_roles_secrets:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- secretname
|
||||||
|
- userkey
|
||||||
|
- passwordkey
|
||||||
|
properties:
|
||||||
|
secretname:
|
||||||
|
type: string
|
||||||
|
userkey:
|
||||||
|
type: string
|
||||||
|
passwordkey:
|
||||||
|
type: string
|
||||||
|
rolekey:
|
||||||
|
type: string
|
||||||
|
defaultuservalue:
|
||||||
|
type: string
|
||||||
|
defaultrolevalue:
|
||||||
|
type: string
|
||||||
|
details:
|
||||||
|
type: string
|
||||||
|
template:
|
||||||
|
type: boolean
|
||||||
inherited_labels:
|
inherited_labels:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
|
@ -149,6 +179,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
pod_environment_configmap:
|
pod_environment_configmap:
|
||||||
type: string
|
type: string
|
||||||
|
pod_environment_secret:
|
||||||
|
type: string
|
||||||
pod_management_policy:
|
pod_management_policy:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
|
|
@ -168,6 +200,10 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
secret_name_template:
|
secret_name_template:
|
||||||
type: string
|
type: string
|
||||||
|
spilo_runasuser:
|
||||||
|
type: integer
|
||||||
|
spilo_runasgroup:
|
||||||
|
type: integer
|
||||||
spilo_fsgroup:
|
spilo_fsgroup:
|
||||||
type: integer
|
type: integer
|
||||||
spilo_privileged:
|
spilo_privileged:
|
||||||
|
|
@ -227,6 +263,11 @@ spec:
|
||||||
type: boolean
|
type: boolean
|
||||||
enable_replica_load_balancer:
|
enable_replica_load_balancer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
external_traffic_policy:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- "Cluster"
|
||||||
|
- "Local"
|
||||||
master_dns_name_format:
|
master_dns_name_format:
|
||||||
type: string
|
type: string
|
||||||
replica_dns_name_format:
|
replica_dns_name_format:
|
||||||
|
|
|
||||||
|
|
@ -374,6 +374,10 @@ spec:
|
||||||
items:
|
items:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties: true
|
additionalProperties: true
|
||||||
|
spiloRunAsUser:
|
||||||
|
type: integer
|
||||||
|
spiloRunAsGroup:
|
||||||
|
type: integer
|
||||||
spiloFSGroup:
|
spiloFSGroup:
|
||||||
type: integer
|
type: integer
|
||||||
standby:
|
standby:
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,9 @@ metadata:
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
data:
|
data:
|
||||||
|
{{- if .Values.podPriorityClassName }}
|
||||||
|
pod_priority_class_name: {{ .Values.podPriorityClassName }}
|
||||||
|
{{- end }}
|
||||||
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
||||||
{{ toYaml .Values.configGeneral | indent 2 }}
|
{{ toYaml .Values.configGeneral | indent 2 }}
|
||||||
{{ toYaml .Values.configUsers | indent 2 }}
|
{{ toYaml .Values.configUsers | indent 2 }}
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,10 @@ spec:
|
||||||
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
env:
|
env:
|
||||||
|
{{- if .Values.enableJsonLogging }}
|
||||||
|
- name: ENABLE_JSON_LOGGING
|
||||||
|
value: "true"
|
||||||
|
{{- end }}
|
||||||
{{- if eq .Values.configTarget "ConfigMap" }}
|
{{- if eq .Values.configTarget "ConfigMap" }}
|
||||||
- name: CONFIG_MAP_NAME
|
- name: CONFIG_MAP_NAME
|
||||||
value: {{ template "postgres-operator.fullname" . }}
|
value: {{ template "postgres-operator.fullname" . }}
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,9 @@ configuration:
|
||||||
users:
|
users:
|
||||||
{{ toYaml .Values.configUsers | indent 4 }}
|
{{ toYaml .Values.configUsers | indent 4 }}
|
||||||
kubernetes:
|
kubernetes:
|
||||||
|
{{- if .Values.podPriorityClassName }}
|
||||||
|
pod_priority_class_name: {{ .Values.podPriorityClassName }}
|
||||||
|
{{- end }}
|
||||||
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
||||||
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
|
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
|
||||||
{{ toYaml .Values.configKubernetes | indent 4 }}
|
{{ toYaml .Values.configKubernetes | indent 4 }}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,15 @@
|
||||||
|
{{- if .Values.podPriorityClassName }}
|
||||||
|
apiVersion: scheduling.k8s.io/v1
|
||||||
|
description: 'Use only for databases controlled by Postgres operator'
|
||||||
|
kind: PriorityClass
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||||
|
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
name: {{ .Values.podPriorityClassName }}
|
||||||
|
preemptionPolicy: PreemptLowerPriority
|
||||||
|
globalDefault: false
|
||||||
|
value: 1000000
|
||||||
|
{{- end }}
|
||||||
|
|
@ -67,6 +67,12 @@ configKubernetes:
|
||||||
# keya: valuea
|
# keya: valuea
|
||||||
# keyb: valueb
|
# keyb: valueb
|
||||||
|
|
||||||
|
# key name for annotation that compares manifest value with current date
|
||||||
|
# delete_annotation_date_key: "delete-date"
|
||||||
|
|
||||||
|
# key name for annotation that compares manifest value with cluster name
|
||||||
|
# delete_annotation_name_key: "delete-clustername"
|
||||||
|
|
||||||
# list of annotations propagated from cluster manifest to statefulset and deployment
|
# list of annotations propagated from cluster manifest to statefulset and deployment
|
||||||
# downscaler_annotations:
|
# downscaler_annotations:
|
||||||
# - deployment-time
|
# - deployment-time
|
||||||
|
|
@ -104,6 +110,8 @@ configKubernetes:
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
||||||
# pod_environment_configmap: "default/my-custom-config"
|
# pod_environment_configmap: "default/my-custom-config"
|
||||||
|
# name of the Secret (in cluster namespace) with environment variables to populate on every pod
|
||||||
|
# pod_environment_secret: "my-custom-secret"
|
||||||
|
|
||||||
# specify the pod management policy of stateful sets of Postgres clusters
|
# specify the pod management policy of stateful sets of Postgres clusters
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
|
|
@ -119,11 +127,16 @@ configKubernetes:
|
||||||
pod_terminate_grace_period: 5m
|
pod_terminate_grace_period: 5m
|
||||||
# template for database user secrets generated by the operator
|
# template for database user secrets generated by the operator
|
||||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||||
|
# set user and group for the spilo container (required to run Spilo as non-root process)
|
||||||
|
# spilo_runasuser: "101"
|
||||||
|
# spilo_runasgroup: "103"
|
||||||
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||||
# spilo_fsgroup: 103
|
# spilo_fsgroup: 103
|
||||||
|
|
||||||
# whether the Spilo container should run in privileged mode
|
# whether the Spilo container should run in privileged mode
|
||||||
spilo_privileged: false
|
spilo_privileged: false
|
||||||
|
# storage resize strategy, available options are: ebs, pvc, off
|
||||||
|
storage_resize_mode: ebs
|
||||||
# operator watches for postgres objects in the given namespace
|
# operator watches for postgres objects in the given namespace
|
||||||
watched_namespace: "*" # listen to all namespaces
|
watched_namespace: "*" # listen to all namespaces
|
||||||
|
|
||||||
|
|
@ -170,6 +183,8 @@ configLoadBalancer:
|
||||||
enable_master_load_balancer: false
|
enable_master_load_balancer: false
|
||||||
# toggles service type load balancer pointing to the replica pod of the cluster
|
# toggles service type load balancer pointing to the replica pod of the cluster
|
||||||
enable_replica_load_balancer: false
|
enable_replica_load_balancer: false
|
||||||
|
# define external traffic policy for the load balancer
|
||||||
|
external_traffic_policy: "Cluster"
|
||||||
# defines the DNS name string template for the master load balancer cluster
|
# defines the DNS name string template for the master load balancer cluster
|
||||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||||
# defines the DNS name string template for the replica load balancer cluster
|
# defines the DNS name string template for the replica load balancer cluster
|
||||||
|
|
@ -271,7 +286,7 @@ configConnectionPooler:
|
||||||
# db user for pooler to use
|
# db user for pooler to use
|
||||||
connection_pooler_user: "pooler"
|
connection_pooler_user: "pooler"
|
||||||
# docker image
|
# docker image
|
||||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-8"
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9"
|
||||||
# max db connections the pooler should hold
|
# max db connections the pooler should hold
|
||||||
connection_pooler_max_db_connections: 60
|
connection_pooler_max_db_connections: 60
|
||||||
# default pooling mode
|
# default pooling mode
|
||||||
|
|
@ -305,8 +320,12 @@ podServiceAccount:
|
||||||
# If not set a name is generated using the fullname template and "-pod" suffix
|
# If not set a name is generated using the fullname template and "-pod" suffix
|
||||||
name: "postgres-pod"
|
name: "postgres-pod"
|
||||||
|
|
||||||
|
# priority class for operator pod
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
|
# priority class for database pods
|
||||||
|
podPriorityClassName: ""
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: 500m
|
cpu: 500m
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,9 @@ podLabels: {}
|
||||||
|
|
||||||
configTarget: "ConfigMap"
|
configTarget: "ConfigMap"
|
||||||
|
|
||||||
|
# JSON logging format
|
||||||
|
enableJsonLogging: false
|
||||||
|
|
||||||
# general configuration parameters
|
# general configuration parameters
|
||||||
configGeneral:
|
configGeneral:
|
||||||
# choose if deployment creates/updates CRDs with OpenAPIV3Validation
|
# choose if deployment creates/updates CRDs with OpenAPIV3Validation
|
||||||
|
|
@ -63,6 +66,12 @@ configKubernetes:
|
||||||
# annotations attached to each database pod
|
# annotations attached to each database pod
|
||||||
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
||||||
|
|
||||||
|
# key name for annotation that compares manifest value with current date
|
||||||
|
# delete_annotation_date_key: "delete-date"
|
||||||
|
|
||||||
|
# key name for annotation that compares manifest value with cluster name
|
||||||
|
# delete_annotation_name_key: "delete-clustername"
|
||||||
|
|
||||||
# list of annotations propagated from cluster manifest to statefulset and deployment
|
# list of annotations propagated from cluster manifest to statefulset and deployment
|
||||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||||
|
|
||||||
|
|
@ -95,6 +104,8 @@ configKubernetes:
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
||||||
# pod_environment_configmap: "default/my-custom-config"
|
# pod_environment_configmap: "default/my-custom-config"
|
||||||
|
# name of the Secret (in cluster namespace) with environment variables to populate on every pod
|
||||||
|
# pod_environment_secret: "my-custom-secret"
|
||||||
|
|
||||||
# specify the pod management policy of stateful sets of Postgres clusters
|
# specify the pod management policy of stateful sets of Postgres clusters
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
|
|
@ -110,11 +121,16 @@ configKubernetes:
|
||||||
pod_terminate_grace_period: 5m
|
pod_terminate_grace_period: 5m
|
||||||
# template for database user secrets generated by the operator
|
# template for database user secrets generated by the operator
|
||||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||||
|
# set user and group for the spilo container (required to run Spilo as non-root process)
|
||||||
|
# spilo_runasuser: "101"
|
||||||
|
# spilo_runasgroup: "103"
|
||||||
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||||
# spilo_fsgroup: "103"
|
# spilo_fsgroup: "103"
|
||||||
|
|
||||||
# whether the Spilo container should run in privileged mode
|
# whether the Spilo container should run in privileged mode
|
||||||
spilo_privileged: "false"
|
spilo_privileged: "false"
|
||||||
|
# storage resize strategy, available options are: ebs, pvc, off
|
||||||
|
storage_resize_mode: ebs
|
||||||
# operator watches for postgres objects in the given namespace
|
# operator watches for postgres objects in the given namespace
|
||||||
watched_namespace: "*" # listen to all namespaces
|
watched_namespace: "*" # listen to all namespaces
|
||||||
|
|
||||||
|
|
@ -159,6 +175,8 @@ configLoadBalancer:
|
||||||
enable_master_load_balancer: "false"
|
enable_master_load_balancer: "false"
|
||||||
# toggles service type load balancer pointing to the replica pod of the cluster
|
# toggles service type load balancer pointing to the replica pod of the cluster
|
||||||
enable_replica_load_balancer: "false"
|
enable_replica_load_balancer: "false"
|
||||||
|
# define external traffic policy for the load balancer
|
||||||
|
external_traffic_policy: "Cluster"
|
||||||
# defines the DNS name string template for the master load balancer cluster
|
# defines the DNS name string template for the master load balancer cluster
|
||||||
master_dns_name_format: '{cluster}.{team}.{hostedzone}'
|
master_dns_name_format: '{cluster}.{team}.{hostedzone}'
|
||||||
# defines the DNS name string template for the replica load balancer cluster
|
# defines the DNS name string template for the replica load balancer cluster
|
||||||
|
|
@ -263,7 +281,7 @@ configConnectionPooler:
|
||||||
# db user for pooler to use
|
# db user for pooler to use
|
||||||
connection_pooler_user: "pooler"
|
connection_pooler_user: "pooler"
|
||||||
# docker image
|
# docker image
|
||||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-8"
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9"
|
||||||
# max db connections the pooler should hold
|
# max db connections the pooler should hold
|
||||||
connection_pooler_max_db_connections: "60"
|
connection_pooler_max_db_connections: "60"
|
||||||
# default pooling mode
|
# default pooling mode
|
||||||
|
|
@ -297,8 +315,12 @@ podServiceAccount:
|
||||||
# If not set a name is generated using the fullname template and "-pod" suffix
|
# If not set a name is generated using the fullname template and "-pod" suffix
|
||||||
name: "postgres-pod"
|
name: "postgres-pod"
|
||||||
|
|
||||||
|
# priority class for operator pod
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
|
# priority class for database pods
|
||||||
|
podPriorityClassName: ""
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: 500m
|
cpu: 500m
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"log"
|
log "github.com/sirupsen/logrus"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
@ -36,6 +36,8 @@ func init() {
|
||||||
flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API")
|
flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true"
|
||||||
|
|
||||||
configMapRawName := os.Getenv("CONFIG_MAP_NAME")
|
configMapRawName := os.Getenv("CONFIG_MAP_NAME")
|
||||||
if configMapRawName != "" {
|
if configMapRawName != "" {
|
||||||
|
|
||||||
|
|
@ -63,6 +65,9 @@ func init() {
|
||||||
func main() {
|
func main() {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
if config.EnableJsonLogging {
|
||||||
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
}
|
||||||
log.SetOutput(os.Stdout)
|
log.SetOutput(os.Stdout)
|
||||||
log.Printf("Spilo operator %s\n", version)
|
log.Printf("Spilo operator %s\n", version)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,10 @@ version: "2017-09-20"
|
||||||
pipeline:
|
pipeline:
|
||||||
- id: build-postgres-operator
|
- id: build-postgres-operator
|
||||||
type: script
|
type: script
|
||||||
|
vm: large
|
||||||
|
cache:
|
||||||
|
paths:
|
||||||
|
- /go/pkg/mod
|
||||||
commands:
|
commands:
|
||||||
- desc: 'Update'
|
- desc: 'Update'
|
||||||
cmd: |
|
cmd: |
|
||||||
|
|
@ -12,7 +16,7 @@ pipeline:
|
||||||
- desc: 'Install go'
|
- desc: 'Install go'
|
||||||
cmd: |
|
cmd: |
|
||||||
cd /tmp
|
cd /tmp
|
||||||
wget -q https://storage.googleapis.com/golang/go1.14.linux-amd64.tar.gz -O go.tar.gz
|
wget -q https://storage.googleapis.com/golang/go1.14.7.linux-amd64.tar.gz -O go.tar.gz
|
||||||
tar -xf go.tar.gz
|
tar -xf go.tar.gz
|
||||||
mv go /usr/local
|
mv go /usr/local
|
||||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ Once the validation is enabled it can only be disabled manually by editing or
|
||||||
patching the CRD manifest:
|
patching the CRD manifest:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}'
|
kubectl patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}'
|
||||||
```
|
```
|
||||||
|
|
||||||
## Non-default cluster domain
|
## Non-default cluster domain
|
||||||
|
|
@ -123,6 +123,68 @@ Every other Postgres cluster which lacks the annotation will be ignored by this
|
||||||
operator. Conversely, operators without a defined `CONTROLLER_ID` will ignore
|
operator. Conversely, operators without a defined `CONTROLLER_ID` will ignore
|
||||||
clusters with defined ownership of another operator.
|
clusters with defined ownership of another operator.
|
||||||
|
|
||||||
|
## Delete protection via annotations
|
||||||
|
|
||||||
|
To avoid accidental deletes of Postgres clusters the operator can check the
|
||||||
|
manifest for two existing annotations containing the cluster name and/or the
|
||||||
|
current date (in YYYY-MM-DD format). The name of the annotation keys can be
|
||||||
|
defined in the configuration. By default, they are not set which disables the
|
||||||
|
delete protection. Thus, one could choose to only go with one annotation.
|
||||||
|
|
||||||
|
**postgres-operator ConfigMap**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: postgres-operator
|
||||||
|
data:
|
||||||
|
delete_annotation_date_key: "delete-date"
|
||||||
|
delete_annotation_name_key: "delete-clustername"
|
||||||
|
```
|
||||||
|
|
||||||
|
**OperatorConfiguration**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "acid.zalan.do/v1"
|
||||||
|
kind: OperatorConfiguration
|
||||||
|
metadata:
|
||||||
|
name: postgresql-operator-configuration
|
||||||
|
configuration:
|
||||||
|
kubernetes:
|
||||||
|
delete_annotation_date_key: "delete-date"
|
||||||
|
delete_annotation_name_key: "delete-clustername"
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, every cluster manifest must contain the configured annotation keys to
|
||||||
|
trigger the delete process when running `kubectl delete pg`. Note, that the
|
||||||
|
`Postgresql` resource would still get deleted as K8s' API server does not
|
||||||
|
block it. Only the operator logs will tell, that the delete criteria wasn't
|
||||||
|
met.
|
||||||
|
|
||||||
|
**cluster manifest**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "acid.zalan.do/v1"
|
||||||
|
kind: postgresql
|
||||||
|
metadata:
|
||||||
|
name: demo-cluster
|
||||||
|
annotations:
|
||||||
|
delete-date: "2020-08-31"
|
||||||
|
delete-clustername: "demo-cluster"
|
||||||
|
spec:
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
In case, the resource has been deleted accidentally or the annotations were
|
||||||
|
simply forgotten, it's safe to recreate the cluster with `kubectl create`.
|
||||||
|
Existing Postgres cluster are not replaced by the operator. But, as the
|
||||||
|
original cluster still exists the status will show `CreateFailed` at first.
|
||||||
|
On the next sync event it should change to `Running`. However, as it is in
|
||||||
|
fact a new resource for K8s, the UID will differ which can trigger a rolling
|
||||||
|
update of the pods because the UID is used as part of backup path to S3.
|
||||||
|
|
||||||
|
|
||||||
## Role-based access control for the operator
|
## Role-based access control for the operator
|
||||||
|
|
||||||
The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml)
|
The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml)
|
||||||
|
|
@ -319,11 +381,18 @@ spec:
|
||||||
|
|
||||||
|
|
||||||
## Custom Pod Environment Variables
|
## Custom Pod Environment Variables
|
||||||
|
It is possible to configure a ConfigMap as well as a Secret which are used by the Postgres pods as
|
||||||
It is possible to configure a ConfigMap which is used by the Postgres pods as
|
|
||||||
an additional provider for environment variables. One use case is to customize
|
an additional provider for environment variables. One use case is to customize
|
||||||
the Spilo image and configure it with environment variables. The ConfigMap with
|
the Spilo image and configure it with environment variables. Another case could be to provide custom
|
||||||
the additional settings is referenced in the operator's main configuration.
|
cloud provider or backup settings.
|
||||||
|
|
||||||
|
In general the Operator will give preference to the globally configured variables, to not have the custom
|
||||||
|
ones interfere with core functionality. Variables with the 'WAL_' and 'LOG_' prefix can be overwritten though, to allow
|
||||||
|
backup and logshipping to be specified differently.
|
||||||
|
|
||||||
|
|
||||||
|
### Via ConfigMap
|
||||||
|
The ConfigMap with the additional settings is referenced in the operator's main configuration.
|
||||||
A namespace can be specified along with the name. If left out, the configured
|
A namespace can be specified along with the name. If left out, the configured
|
||||||
default namespace of your K8s client will be used and if the ConfigMap is not
|
default namespace of your K8s client will be used and if the ConfigMap is not
|
||||||
found there, the Postgres cluster's namespace is taken when different:
|
found there, the Postgres cluster's namespace is taken when different:
|
||||||
|
|
@ -365,7 +434,54 @@ data:
|
||||||
MY_CUSTOM_VAR: value
|
MY_CUSTOM_VAR: value
|
||||||
```
|
```
|
||||||
|
|
||||||
This ConfigMap is then added as a source of environment variables to the
|
The key-value pairs of the ConfigMap are then added as environment variables to the
|
||||||
|
Postgres StatefulSet/pods.
|
||||||
|
|
||||||
|
|
||||||
|
### Via Secret
|
||||||
|
The Secret with the additional variables is referenced in the operator's main configuration.
|
||||||
|
To protect the values of the secret from being exposed in the pod spec they are each referenced
|
||||||
|
as SecretKeyRef.
|
||||||
|
This does not allow for the secret to be in a different namespace as the pods though
|
||||||
|
|
||||||
|
**postgres-operator ConfigMap**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: postgres-operator
|
||||||
|
data:
|
||||||
|
# referencing secret with custom environment variables
|
||||||
|
pod_environment_secret: postgres-pod-secrets
|
||||||
|
```
|
||||||
|
|
||||||
|
**OperatorConfiguration**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "acid.zalan.do/v1"
|
||||||
|
kind: OperatorConfiguration
|
||||||
|
metadata:
|
||||||
|
name: postgresql-operator-configuration
|
||||||
|
configuration:
|
||||||
|
kubernetes:
|
||||||
|
# referencing secret with custom environment variables
|
||||||
|
pod_environment_secret: postgres-pod-secrets
|
||||||
|
```
|
||||||
|
|
||||||
|
**referenced Secret `postgres-pod-secrets`**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: postgres-pod-secrets
|
||||||
|
namespace: default
|
||||||
|
data:
|
||||||
|
MY_CUSTOM_VAR: dmFsdWU=
|
||||||
|
```
|
||||||
|
|
||||||
|
The key-value pairs of the Secret are all accessible as environment variables to the
|
||||||
Postgres StatefulSet/pods.
|
Postgres StatefulSet/pods.
|
||||||
|
|
||||||
## Limiting the number of min and max instances in clusters
|
## Limiting the number of min and max instances in clusters
|
||||||
|
|
@ -532,9 +648,9 @@ The configuration paramaters that we will be using are:
|
||||||
* `gcp_credentials`
|
* `gcp_credentials`
|
||||||
* `wal_gs_bucket`
|
* `wal_gs_bucket`
|
||||||
|
|
||||||
### Generate a K8 secret resource
|
### Generate a K8s secret resource
|
||||||
|
|
||||||
Generate the K8 secret resource that will contain your service account's
|
Generate the K8s secret resource that will contain your service account's
|
||||||
credentials. It's highly recommended to use a service account and limit its
|
credentials. It's highly recommended to use a service account and limit its
|
||||||
scope to just the WAL-E bucket.
|
scope to just the WAL-E bucket.
|
||||||
|
|
||||||
|
|
@ -565,7 +681,7 @@ aws_or_gcp:
|
||||||
# log_s3_bucket: ""
|
# log_s3_bucket: ""
|
||||||
# wal_s3_bucket: ""
|
# wal_s3_bucket: ""
|
||||||
wal_gs_bucket: "postgres-backups-bucket-28302F2" # name of bucket on where to save the WAL-E logs
|
wal_gs_bucket: "postgres-backups-bucket-28302F2" # name of bucket on where to save the WAL-E logs
|
||||||
gcp_credentials: "/var/secrets/google/key.json" # combination of the mount path & key in the K8 resource. (i.e. key.json)
|
gcp_credentials: "/var/secrets/google/key.json" # combination of the mount path & key in the K8s resource. (i.e. key.json)
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -237,9 +237,11 @@ kubectl logs acid-minimal-cluster-0
|
||||||
|
|
||||||
## End-to-end tests
|
## End-to-end tests
|
||||||
|
|
||||||
The operator provides reference end-to-end tests (e2e) (as Docker image) to
|
The operator provides reference end-to-end (e2e) tests to
|
||||||
ensure various infrastructure parts work smoothly together. Each e2e execution
|
ensure various infrastructure parts work smoothly together. The test code is available at `e2e/tests`.
|
||||||
tests a Postgres Operator image built from the current git branch. The test
|
The special `registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build.
|
||||||
|
|
||||||
|
Each e2e execution tests a Postgres Operator image built from the current git branch. The test
|
||||||
runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/),
|
runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/),
|
||||||
utilizes provided manifest examples, and runs e2e tests contained in the `tests`
|
utilizes provided manifest examples, and runs e2e tests contained in the `tests`
|
||||||
folder. The K8s API client in the container connects to the `kind` cluster via
|
folder. The K8s API client in the container connects to the `kind` cluster via
|
||||||
|
|
|
||||||
|
|
@ -160,7 +160,7 @@ You can now access the web interface by port forwarding the UI pod (mind the
|
||||||
label selector) and enter `localhost:8081` in your browser:
|
label selector) and enter `localhost:8081` in your browser:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081
|
kubectl port-forward svc/postgres-operator-ui 8081:80
|
||||||
```
|
```
|
||||||
|
|
||||||
Available option are explained in detail in the [UI docs](operator-ui.md).
|
Available option are explained in detail in the [UI docs](operator-ui.md).
|
||||||
|
|
|
||||||
|
|
@ -65,6 +65,16 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
||||||
custom Docker image that overrides the **docker_image** operator parameter.
|
custom Docker image that overrides the **docker_image** operator parameter.
|
||||||
It should be a [Spilo](https://github.com/zalando/spilo) image. Optional.
|
It should be a [Spilo](https://github.com/zalando/spilo) image. Optional.
|
||||||
|
|
||||||
|
* **spiloRunAsUser**
|
||||||
|
sets the user ID which should be used in the container to run the process.
|
||||||
|
This must be set to run the container without root. By default the container
|
||||||
|
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||||
|
|
||||||
|
* **spiloRunAsGroup**
|
||||||
|
sets the group ID which should be used in the container to run the process.
|
||||||
|
This must be set to run the container without root. By default the container
|
||||||
|
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||||
|
|
||||||
* **spiloFSGroup**
|
* **spiloFSGroup**
|
||||||
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
|
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
|
||||||
writable by the group ID specified. This will override the **spilo_fsgroup**
|
writable by the group ID specified. This will override the **spilo_fsgroup**
|
||||||
|
|
|
||||||
|
|
@ -56,3 +56,7 @@ The following environment variables are accepted by the operator:
|
||||||
* **CRD_READY_WAIT_INTERVAL**
|
* **CRD_READY_WAIT_INTERVAL**
|
||||||
defines the interval between consecutive attempts waiting for the
|
defines the interval between consecutive attempts waiting for the
|
||||||
`postgresql` CRD to be created. The default is 5s.
|
`postgresql` CRD to be created. The default is 5s.
|
||||||
|
|
||||||
|
* **ENABLE_JSON_LOGGING**
|
||||||
|
Set to `true` for JSON formatted logging output.
|
||||||
|
The default is false.
|
||||||
|
|
|
||||||
|
|
@ -200,6 +200,16 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
of a database created by the operator. If the annotation key is also provided
|
of a database created by the operator. If the annotation key is also provided
|
||||||
by the database definition, the database definition value is used.
|
by the database definition, the database definition value is used.
|
||||||
|
|
||||||
|
* **delete_annotation_date_key**
|
||||||
|
key name for annotation that compares manifest value with current date in the
|
||||||
|
YYYY-MM-DD format. Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`.
|
||||||
|
The default is empty which also disables this delete protection check.
|
||||||
|
|
||||||
|
* **delete_annotation_name_key**
|
||||||
|
key name for annotation that compares manifest value with Postgres cluster name.
|
||||||
|
Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. The default is
|
||||||
|
empty which also disables this delete protection check.
|
||||||
|
|
||||||
* **downscaler_annotations**
|
* **downscaler_annotations**
|
||||||
An array of annotations that should be passed from Postgres CRD on to the
|
An array of annotations that should be passed from Postgres CRD on to the
|
||||||
statefulset and, if exists, to the connection pooler deployment as well.
|
statefulset and, if exists, to the connection pooler deployment as well.
|
||||||
|
|
@ -252,8 +262,14 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
teams API. The default is `postgresql-operator`.
|
teams API. The default is `postgresql-operator`.
|
||||||
|
|
||||||
* **infrastructure_roles_secret_name**
|
* **infrastructure_roles_secret_name**
|
||||||
namespaced name of the secret containing infrastructure roles names and
|
*deprecated*: namespaced name of the secret containing infrastructure roles
|
||||||
passwords.
|
with user names, passwords and role membership.
|
||||||
|
|
||||||
|
* **infrastructure_roles_secrets**
|
||||||
|
array of infrastructure role definitions which reference existing secrets
|
||||||
|
and specify the key names from which user name, password and role membership
|
||||||
|
are extracted. For the ConfigMap this has to be a string which allows
|
||||||
|
referencing only one infrastructure roles secret. The default is empty.
|
||||||
|
|
||||||
* **pod_role_label**
|
* **pod_role_label**
|
||||||
name of the label assigned to the Postgres pods (and services/endpoints) by
|
name of the label assigned to the Postgres pods (and services/endpoints) by
|
||||||
|
|
@ -301,6 +317,16 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
that should be assigned to the Postgres pods. The priority class itself must
|
that should be assigned to the Postgres pods. The priority class itself must
|
||||||
be defined in advance. Default is empty (use the default priority class).
|
be defined in advance. Default is empty (use the default priority class).
|
||||||
|
|
||||||
|
* **spilo_runasuser**
|
||||||
|
sets the user ID which should be used in the container to run the process.
|
||||||
|
This must be set to run the container without root. By default the container
|
||||||
|
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||||
|
|
||||||
|
* **spilo_runasgroup**
|
||||||
|
sets the group ID which should be used in the container to run the process.
|
||||||
|
This must be set to run the container without root. By default the container
|
||||||
|
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||||
|
|
||||||
* **spilo_fsgroup**
|
* **spilo_fsgroup**
|
||||||
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
|
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
|
||||||
writable by the group ID specified. This is required to run Spilo as a
|
writable by the group ID specified. This is required to run Spilo as a
|
||||||
|
|
@ -333,6 +359,12 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
of stateful sets of PG clusters. The default is `ordered_ready`, the second
|
of stateful sets of PG clusters. The default is `ordered_ready`, the second
|
||||||
possible value is `parallel`.
|
possible value is `parallel`.
|
||||||
|
|
||||||
|
* **storage_resize_mode**
|
||||||
|
defines how operator handels the difference between requested volume size and
|
||||||
|
actual size. Available options are: ebs - tries to resize EBS volume, pvc -
|
||||||
|
changes PVC definition, off - disables resize of the volumes. Default is "ebs".
|
||||||
|
When using OpenShift please use one of the other available options.
|
||||||
|
|
||||||
## Kubernetes resource requests
|
## Kubernetes resource requests
|
||||||
|
|
||||||
This group allows you to configure resource requests for the Postgres pods.
|
This group allows you to configure resource requests for the Postgres pods.
|
||||||
|
|
@ -402,6 +434,12 @@ CRD-based configuration.
|
||||||
Those options affect the behavior of load balancers created by the operator.
|
Those options affect the behavior of load balancers created by the operator.
|
||||||
In the CRD-based configuration they are grouped under the `load_balancer` key.
|
In the CRD-based configuration they are grouped under the `load_balancer` key.
|
||||||
|
|
||||||
|
* **custom_service_annotations**
|
||||||
|
This key/value map provides a list of annotations that get attached to each
|
||||||
|
service of a cluster created by the operator. If the annotation key is also
|
||||||
|
provided by the cluster definition, the manifest value is used.
|
||||||
|
Optional.
|
||||||
|
|
||||||
* **db_hosted_zone**
|
* **db_hosted_zone**
|
||||||
DNS zone for the cluster DNS name when the load balancer is configured for
|
DNS zone for the cluster DNS name when the load balancer is configured for
|
||||||
the cluster. Only used when combined with
|
the cluster. Only used when combined with
|
||||||
|
|
@ -418,11 +456,8 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
|
||||||
cluster. Can be overridden by individual cluster settings. The default is
|
cluster. Can be overridden by individual cluster settings. The default is
|
||||||
`false`.
|
`false`.
|
||||||
|
|
||||||
* **custom_service_annotations**
|
* **external_traffic_policy** defines external traffic policy for load
|
||||||
This key/value map provides a list of annotations that get attached to each
|
balancers. Allowed values are `Cluster` (default) and `Local`.
|
||||||
service of a cluster created by the operator. If the annotation key is also
|
|
||||||
provided by the cluster definition, the manifest value is used.
|
|
||||||
Optional.
|
|
||||||
|
|
||||||
* **master_dns_name_format** defines the DNS name string template for the
|
* **master_dns_name_format** defines the DNS name string template for the
|
||||||
master load balancer cluster. The default is
|
master load balancer cluster. The default is
|
||||||
|
|
|
||||||
55
docs/user.md
55
docs/user.md
|
|
@ -150,23 +150,62 @@ user. There are two ways to define them:
|
||||||
|
|
||||||
#### Infrastructure roles secret
|
#### Infrastructure roles secret
|
||||||
|
|
||||||
The infrastructure roles secret is specified by the `infrastructure_roles_secret_name`
|
Infrastructure roles can be specified by the `infrastructure_roles_secrets`
|
||||||
parameter. The role definition looks like this (values are base64 encoded):
|
parameter where you can reference multiple existing secrets. Prior to `v1.6.0`
|
||||||
|
the operator could only reference one secret with the
|
||||||
|
`infrastructure_roles_secret_name` option. However, this secret could contain
|
||||||
|
multiple roles using the same set of keys plus incrementing index.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: postgresql-infrastructure-roles
|
||||||
|
data:
|
||||||
user1: ZGJ1c2Vy
|
user1: ZGJ1c2Vy
|
||||||
password1: c2VjcmV0
|
password1: c2VjcmV0
|
||||||
inrole1: b3BlcmF0b3I=
|
inrole1: b3BlcmF0b3I=
|
||||||
|
user2: ...
|
||||||
```
|
```
|
||||||
|
|
||||||
The block above describes the infrastructure role 'dbuser' with password
|
The block above describes the infrastructure role 'dbuser' with password
|
||||||
'secret' that is a member of the 'operator' role. For the following definitions
|
'secret' that is a member of the 'operator' role. The resulting role will
|
||||||
one must increase the index, i.e. the next role will be defined as 'user2' and
|
automatically be a login role.
|
||||||
so on. The resulting role will automatically be a login role.
|
|
||||||
|
|
||||||
Note that with definitions that solely use the infrastructure roles secret
|
With the new option users can configure the names of secret keys that contain
|
||||||
there is no way to specify role options (like superuser or nologin) or role
|
the user name, password etc. The secret itself is referenced by the
|
||||||
memberships. This is where the ConfigMap comes into play.
|
`secretname` key. If the secret uses a template for multiple roles as described
|
||||||
|
above list them separately.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: OperatorConfiguration
|
||||||
|
metadata:
|
||||||
|
name: postgresql-operator-configuration
|
||||||
|
configuration:
|
||||||
|
kubernetes:
|
||||||
|
infrastructure_roles_secrets:
|
||||||
|
- secretname: "postgresql-infrastructure-roles"
|
||||||
|
userkey: "user1"
|
||||||
|
passwordkey: "password1"
|
||||||
|
rolekey: "inrole1"
|
||||||
|
- secretname: "postgresql-infrastructure-roles"
|
||||||
|
userkey: "user2"
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
Note, only the CRD-based configuration allows for referencing multiple secrets.
|
||||||
|
As of now, the ConfigMap is restricted to either one or the existing template
|
||||||
|
option with `infrastructure_roles_secret_name`. Please, refer to the example
|
||||||
|
manifests to understand how `infrastructure_roles_secrets` has to be configured
|
||||||
|
for the [configmap](../manifests/configmap.yaml) or [CRD configuration](../manifests/postgresql-operator-default-configuration.yaml).
|
||||||
|
|
||||||
|
If both `infrastructure_roles_secret_name` and `infrastructure_roles_secrets`
|
||||||
|
are defined the operator will create roles for both of them. So make sure,
|
||||||
|
they do not collide. Note also, that with definitions that solely use the
|
||||||
|
infrastructure roles secret there is no way to specify role options (like
|
||||||
|
superuser or nologin) or role memberships. This is where the additional
|
||||||
|
ConfigMap comes into play.
|
||||||
|
|
||||||
#### Secret plus ConfigMap
|
#### Secret plus ConfigMap
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,12 @@
|
||||||
FROM ubuntu:18.04
|
# An image to run e2e tests.
|
||||||
|
# The image does not include the tests; all necessary files are bind-mounted when a container starts.
|
||||||
|
FROM ubuntu:20.04
|
||||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||||
|
|
||||||
COPY manifests ./manifests
|
ENV TERM xterm-256color
|
||||||
COPY requirements.txt tests ./
|
|
||||||
|
COPY requirements.txt ./
|
||||||
|
COPY scm-source.json ./
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install --no-install-recommends -y \
|
&& apt-get install --no-install-recommends -y \
|
||||||
|
|
@ -11,13 +15,10 @@ RUN apt-get update \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
curl \
|
curl \
|
||||||
&& pip3 install --no-cache-dir -r requirements.txt \
|
&& pip3 install --no-cache-dir -r requirements.txt \
|
||||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl \
|
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl \
|
||||||
&& chmod +x ./kubectl \
|
&& chmod +x ./kubectl \
|
||||||
&& mv ./kubectl /usr/local/bin/kubectl \
|
&& mv ./kubectl /usr/local/bin/kubectl \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
ARG VERSION=dev
|
ENTRYPOINT ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"]
|
||||||
RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" ./__init__.py
|
|
||||||
|
|
||||||
CMD ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"]
|
|
||||||
|
|
|
||||||
19
e2e/Makefile
19
e2e/Makefile
|
|
@ -1,6 +1,6 @@
|
||||||
.PHONY: clean copy docker push tools test
|
.PHONY: clean copy docker push tools test
|
||||||
|
|
||||||
BINARY ?= postgres-operator-e2e-tests
|
BINARY ?= postgres-operator-e2e-tests-runner
|
||||||
BUILD_FLAGS ?= -v
|
BUILD_FLAGS ?= -v
|
||||||
CGO_ENABLED ?= 0
|
CGO_ENABLED ?= 0
|
||||||
ifeq ($(RACE),1)
|
ifeq ($(RACE),1)
|
||||||
|
|
@ -34,15 +34,20 @@ copy: clean
|
||||||
mkdir manifests
|
mkdir manifests
|
||||||
cp ../manifests -r .
|
cp ../manifests -r .
|
||||||
|
|
||||||
docker: copy
|
docker: scm-source.json
|
||||||
docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" .
|
docker build -t "$(IMAGE):$(TAG)" .
|
||||||
|
|
||||||
|
scm-source.json: ../.git
|
||||||
|
echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json
|
||||||
|
|
||||||
push: docker
|
push: docker
|
||||||
docker push "$(IMAGE):$(TAG)"
|
docker push "$(IMAGE):$(TAG)"
|
||||||
|
|
||||||
tools: docker
|
tools:
|
||||||
# install pinned version of 'kind'
|
# install pinned version of 'kind'
|
||||||
GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1
|
# go get must run outside of a dir with a (module-based) Go project !
|
||||||
|
# otherwise go get updates project's dependencies and/or behaves differently
|
||||||
|
cd "/tmp" && GO111MODULE=on go get sigs.k8s.io/kind@v0.9.0
|
||||||
|
|
||||||
e2etest:
|
e2etest: tools copy clean
|
||||||
./run.sh
|
./run.sh main
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,2 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
kubectl exec -it $1 -- sh -c "$2"
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
kind: Cluster
|
kind: Cluster
|
||||||
apiVersion: kind.sigs.k8s.io/v1alpha3
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
nodes:
|
nodes:
|
||||||
- role: control-plane
|
- role: control-plane
|
||||||
- role: worker
|
- role: worker
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
kubernetes==9.0.0
|
kubernetes==11.0.0
|
||||||
timeout_decorator==0.4.1
|
timeout_decorator==0.4.1
|
||||||
pyyaml==5.1
|
pyyaml==5.3.1
|
||||||
|
|
|
||||||
48
e2e/run.sh
48
e2e/run.sh
|
|
@ -6,57 +6,67 @@ set -o nounset
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
IFS=$'\n\t'
|
IFS=$'\n\t'
|
||||||
|
|
||||||
cd $(dirname "$0");
|
|
||||||
|
|
||||||
readonly cluster_name="postgres-operator-e2e-tests"
|
readonly cluster_name="postgres-operator-e2e-tests"
|
||||||
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
||||||
|
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
|
||||||
|
|
||||||
|
echo "Clustername: ${cluster_name}"
|
||||||
|
echo "Kubeconfig path: ${kubeconfig_path}"
|
||||||
|
|
||||||
function pull_images(){
|
function pull_images(){
|
||||||
|
|
||||||
operator_tag=$(git describe --tags --always --dirty)
|
operator_tag=$(git describe --tags --always --dirty)
|
||||||
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]]
|
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]]
|
||||||
then
|
then
|
||||||
docker pull registry.opensource.zalan.do/acid/postgres-operator:latest
|
docker pull registry.opensource.zalan.do/acid/postgres-operator:latest
|
||||||
fi
|
fi
|
||||||
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:${operator_tag}) ]]
|
|
||||||
then
|
|
||||||
docker pull registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:latest
|
|
||||||
fi
|
|
||||||
|
|
||||||
operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1)
|
operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1)
|
||||||
e2e_test_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests" --format "{{.Repository}}:{{.Tag}}" | head -1)
|
|
||||||
|
# this image does not contain the tests; a container mounts them from a local "./tests" dir at start time
|
||||||
|
e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:latest"
|
||||||
|
docker pull ${e2e_test_runner_image}
|
||||||
}
|
}
|
||||||
|
|
||||||
function start_kind(){
|
function start_kind(){
|
||||||
|
echo "Starting kind for e2e tests"
|
||||||
# avoid interference with previous test runs
|
# avoid interference with previous test runs
|
||||||
if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]]
|
if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]]
|
||||||
then
|
then
|
||||||
kind delete cluster --name ${cluster_name}
|
kind delete cluster --name ${cluster_name}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
export KUBECONFIG="${kubeconfig_path}"
|
||||||
kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml
|
kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml
|
||||||
kind load docker-image "${operator_image}" --name ${cluster_name}
|
kind load docker-image "${operator_image}" --name ${cluster_name}
|
||||||
kind load docker-image "${e2e_test_image}" --name ${cluster_name}
|
docker pull "${spilo_image}"
|
||||||
KUBECONFIG="$(kind get kubeconfig-path --name=${cluster_name})"
|
kind load docker-image "${spilo_image}" --name ${cluster_name}
|
||||||
export KUBECONFIG
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function set_kind_api_server_ip(){
|
function set_kind_api_server_ip(){
|
||||||
|
echo "Setting up kind API server ip"
|
||||||
# use the actual kubeconfig to connect to the 'kind' API server
|
# use the actual kubeconfig to connect to the 'kind' API server
|
||||||
# but update the IP address of the API server to the one from the Docker 'bridge' network
|
# but update the IP address of the API server to the one from the Docker 'bridge' network
|
||||||
cp "${KUBECONFIG}" /tmp
|
|
||||||
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
|
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
|
||||||
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
|
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
|
||||||
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
|
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function run_tests(){
|
function run_tests(){
|
||||||
|
echo "Running tests..."
|
||||||
|
|
||||||
|
# tests modify files in ./manifests, so we mount a copy of this directory done by the e2e Makefile
|
||||||
|
|
||||||
|
docker run --rm --network=host -e "TERM=xterm-256color" \
|
||||||
|
--mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \
|
||||||
|
--mount type=bind,source="$(readlink -f manifests)",target=/manifests \
|
||||||
|
--mount type=bind,source="$(readlink -f tests)",target=/tests \
|
||||||
|
--mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \
|
||||||
|
-e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}"
|
||||||
|
|
||||||
docker run --rm --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_image}"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function clean_up(){
|
function clean_up(){
|
||||||
|
echo "Executing cleanup"
|
||||||
unset KUBECONFIG
|
unset KUBECONFIG
|
||||||
kind delete cluster --name ${cluster_name}
|
kind delete cluster --name ${cluster_name}
|
||||||
rm -rf ${kubeconfig_path}
|
rm -rf ${kubeconfig_path}
|
||||||
|
|
@ -66,11 +76,11 @@ function main(){
|
||||||
|
|
||||||
trap "clean_up" QUIT TERM EXIT
|
trap "clean_up" QUIT TERM EXIT
|
||||||
|
|
||||||
pull_images
|
time pull_images
|
||||||
start_kind
|
time start_kind
|
||||||
set_kind_api_server_ip
|
time set_kind_api_server_ip
|
||||||
run_tests
|
run_tests
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
main "$@"
|
"$@"
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
import json
|
||||||
import unittest
|
import unittest
|
||||||
import time
|
import time
|
||||||
import timeout_decorator
|
import timeout_decorator
|
||||||
|
|
@ -6,6 +7,7 @@ import warnings
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
from kubernetes import client, config
|
from kubernetes import client, config
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -32,10 +34,14 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
In the case of test failure the cluster will stay to enable manual examination;
|
In the case of test failure the cluster will stay to enable manual examination;
|
||||||
next invocation of "make test" will re-create it.
|
next invocation of "make test" will re-create it.
|
||||||
'''
|
'''
|
||||||
|
print("Test Setup being executed")
|
||||||
|
|
||||||
# set a single K8s wrapper for all tests
|
# set a single K8s wrapper for all tests
|
||||||
k8s = cls.k8s = K8s()
|
k8s = cls.k8s = K8s()
|
||||||
|
|
||||||
|
# remove existing local storage class and create hostpath class
|
||||||
|
k8s.api.storage_v1_api.delete_storage_class("standard")
|
||||||
|
|
||||||
# operator deploys pod service account there on start up
|
# operator deploys pod service account there on start up
|
||||||
# needed for test_multi_namespace_support()
|
# needed for test_multi_namespace_support()
|
||||||
cls.namespace = "test"
|
cls.namespace = "test"
|
||||||
|
|
@ -50,7 +56,10 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
for filename in ["operator-service-account-rbac.yaml",
|
for filename in ["operator-service-account-rbac.yaml",
|
||||||
"configmap.yaml",
|
"configmap.yaml",
|
||||||
"postgres-operator.yaml"]:
|
"postgres-operator.yaml",
|
||||||
|
"infrastructure-roles.yaml",
|
||||||
|
"infrastructure-roles-new.yaml",
|
||||||
|
"e2e-storage-class.yaml"]:
|
||||||
result = k8s.create_with_kubectl("manifests/" + filename)
|
result = k8s.create_with_kubectl("manifests/" + filename)
|
||||||
print("stdout: {}, stderr: {}".format(result.stdout, result.stderr))
|
print("stdout: {}, stderr: {}".format(result.stdout, result.stderr))
|
||||||
|
|
||||||
|
|
@ -155,6 +164,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
k8s = self.k8s
|
k8s = self.k8s
|
||||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||||
|
|
||||||
|
try:
|
||||||
# enable load balancer services
|
# enable load balancer services
|
||||||
pg_patch_enable_lbs = {
|
pg_patch_enable_lbs = {
|
||||||
"spec": {
|
"spec": {
|
||||||
|
|
@ -195,6 +205,57 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.assertEqual(repl_svc_type, 'ClusterIP',
|
self.assertEqual(repl_svc_type, 'ClusterIP',
|
||||||
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
|
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
|
def test_infrastructure_roles(self):
|
||||||
|
'''
|
||||||
|
Test using external secrets for infrastructure roles
|
||||||
|
'''
|
||||||
|
k8s = self.k8s
|
||||||
|
# update infrastructure roles description
|
||||||
|
secret_name = "postgresql-infrastructure-roles"
|
||||||
|
roles = "secretname: postgresql-infrastructure-roles-new, \
|
||||||
|
userkey: user, rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon"
|
||||||
|
patch_infrastructure_roles = {
|
||||||
|
"data": {
|
||||||
|
"infrastructure_roles_secret_name": secret_name,
|
||||||
|
"infrastructure_roles_secrets": roles,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
k8s.update_config(patch_infrastructure_roles)
|
||||||
|
|
||||||
|
# wait a little before proceeding
|
||||||
|
time.sleep(30)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# check that new roles are represented in the config by requesting the
|
||||||
|
# operator configuration via API
|
||||||
|
operator_pod = k8s.get_operator_pod()
|
||||||
|
get_config_cmd = "wget --quiet -O - localhost:8080/config"
|
||||||
|
result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
|
||||||
|
roles_dict = (json.loads(result.stdout)
|
||||||
|
.get("controller", {})
|
||||||
|
.get("InfrastructureRoles"))
|
||||||
|
|
||||||
|
self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
|
||||||
|
role = roles_dict["robot_zmon_acid_monitoring_new"]
|
||||||
|
role.pop("Password", None)
|
||||||
|
self.assertDictEqual(role, {
|
||||||
|
"Name": "robot_zmon_acid_monitoring_new",
|
||||||
|
"Flags": None,
|
||||||
|
"MemberOf": ["robot_zmon"],
|
||||||
|
"Parameters": None,
|
||||||
|
"AdminRole": "",
|
||||||
|
"Origin": 2,
|
||||||
|
})
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_lazy_spilo_upgrade(self):
|
def test_lazy_spilo_upgrade(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -222,6 +283,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
pod0 = 'acid-minimal-cluster-0'
|
pod0 = 'acid-minimal-cluster-0'
|
||||||
pod1 = 'acid-minimal-cluster-1'
|
pod1 = 'acid-minimal-cluster-1'
|
||||||
|
|
||||||
|
try:
|
||||||
# restart the pod to get a container with the new image
|
# restart the pod to get a container with the new image
|
||||||
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
|
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
|
||||||
time.sleep(60)
|
time.sleep(60)
|
||||||
|
|
@ -229,7 +291,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
# lazy update works if the restarted pod and older pods run different Spilo versions
|
# lazy update works if the restarted pod and older pods run different Spilo versions
|
||||||
new_image = k8s.get_effective_pod_image(pod0)
|
new_image = k8s.get_effective_pod_image(pod0)
|
||||||
old_image = k8s.get_effective_pod_image(pod1)
|
old_image = k8s.get_effective_pod_image(pod1)
|
||||||
self.assertNotEqual(new_image, old_image, "Lazy updated failed: pods have the same image {}".format(new_image))
|
self.assertNotEqual(new_image, old_image,
|
||||||
|
"Lazy updated failed: pods have the same image {}".format(new_image))
|
||||||
|
|
||||||
# sanity check
|
# sanity check
|
||||||
assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image)
|
assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image)
|
||||||
|
|
@ -252,9 +315,14 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
image0 = k8s.get_effective_pod_image(pod0)
|
image0 = k8s.get_effective_pod_image(pod0)
|
||||||
image1 = k8s.get_effective_pod_image(pod1)
|
image1 = k8s.get_effective_pod_image(pod1)
|
||||||
|
|
||||||
assert_msg = "Disabling lazy upgrade failed: pods still have different images {} and {}".format(image0, image1)
|
assert_msg = "Disabling lazy upgrade failed: pods still have different \
|
||||||
|
images {} and {}".format(image0, image1)
|
||||||
self.assertEqual(image0, image1, assert_msg)
|
self.assertEqual(image0, image1, assert_msg)
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_logical_backup_cron_job(self):
|
def test_logical_backup_cron_job(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -279,6 +347,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup)
|
||||||
|
|
||||||
|
try:
|
||||||
k8s.wait_for_logical_backup_job_creation()
|
k8s.wait_for_logical_backup_job_creation()
|
||||||
|
|
||||||
jobs = k8s.get_logical_backup_job().items
|
jobs = k8s.get_logical_backup_job().items
|
||||||
|
|
@ -319,6 +389,10 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.assertEqual(0, len(jobs),
|
self.assertEqual(0, len(jobs),
|
||||||
"Expected 0 logical backup jobs, found {}".format(len(jobs)))
|
"Expected 0 logical backup jobs, found {}".format(len(jobs)))
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_min_resource_limits(self):
|
def test_min_resource_limits(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -357,6 +431,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
|
||||||
|
|
||||||
|
try:
|
||||||
k8s.wait_for_pod_failover(failover_targets, labels)
|
k8s.wait_for_pod_failover(failover_targets, labels)
|
||||||
k8s.wait_for_pod_start('spilo-role=replica')
|
k8s.wait_for_pod_start('spilo-role=replica')
|
||||||
|
|
||||||
|
|
@ -372,6 +448,10 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"Expected memory limit {}, found {}"
|
"Expected memory limit {}, found {}"
|
||||||
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
|
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_multi_namespace_support(self):
|
def test_multi_namespace_support(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -384,10 +464,15 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
pg_manifest["metadata"]["namespace"] = self.namespace
|
pg_manifest["metadata"]["namespace"] = self.namespace
|
||||||
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
|
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
|
||||||
|
|
||||||
|
try:
|
||||||
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
||||||
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
|
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
|
||||||
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
|
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_node_readiness_label(self):
|
def test_node_readiness_label(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -398,6 +483,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
readiness_label = 'lifecycle-status'
|
readiness_label = 'lifecycle-status'
|
||||||
readiness_value = 'ready'
|
readiness_value = 'ready'
|
||||||
|
|
||||||
|
try:
|
||||||
# get nodes of master and replica(s) (expected target of new master)
|
# get nodes of master and replica(s) (expected target of new master)
|
||||||
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||||
num_replicas = len(current_replica_nodes)
|
num_replicas = len(current_replica_nodes)
|
||||||
|
|
@ -433,6 +519,10 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
# toggle pod anti affinity to move replica away from master node
|
# toggle pod anti affinity to move replica away from master node
|
||||||
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_scaling(self):
|
def test_scaling(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -441,6 +531,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
k8s = self.k8s
|
k8s = self.k8s
|
||||||
labels = "application=spilo,cluster-name=acid-minimal-cluster"
|
labels = "application=spilo,cluster-name=acid-minimal-cluster"
|
||||||
|
|
||||||
|
try:
|
||||||
k8s.wait_for_pg_to_scale(3)
|
k8s.wait_for_pg_to_scale(3)
|
||||||
self.assertEqual(3, k8s.count_pods_with_label(labels))
|
self.assertEqual(3, k8s.count_pods_with_label(labels))
|
||||||
self.assert_master_is_unique()
|
self.assert_master_is_unique()
|
||||||
|
|
@ -449,6 +540,10 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.assertEqual(2, k8s.count_pods_with_label(labels))
|
self.assertEqual(2, k8s.count_pods_with_label(labels))
|
||||||
self.assert_master_is_unique()
|
self.assert_master_is_unique()
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_service_annotations(self):
|
def test_service_annotations(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -462,6 +557,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_custom_service_annotations)
|
k8s.update_config(patch_custom_service_annotations)
|
||||||
|
|
||||||
|
try:
|
||||||
pg_patch_custom_annotations = {
|
pg_patch_custom_annotations = {
|
||||||
"spec": {
|
"spec": {
|
||||||
"serviceAnnotations": {
|
"serviceAnnotations": {
|
||||||
|
|
@ -484,6 +580,10 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.assertTrue(k8s.check_service_annotations(
|
self.assertTrue(k8s.check_service_annotations(
|
||||||
"cluster-name=acid-minimal-cluster,spilo-role=replica", annotations))
|
"cluster-name=acid-minimal-cluster,spilo-role=replica", annotations))
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
# clean up
|
# clean up
|
||||||
unpatch_custom_service_annotations = {
|
unpatch_custom_service_annotations = {
|
||||||
"data": {
|
"data": {
|
||||||
|
|
@ -507,6 +607,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_sset_propagate_annotations)
|
k8s.update_config(patch_sset_propagate_annotations)
|
||||||
|
|
||||||
|
try:
|
||||||
pg_crd_annotations = {
|
pg_crd_annotations = {
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"annotations": {
|
"annotations": {
|
||||||
|
|
@ -526,6 +627,10 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations))
|
self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations))
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_taint_based_eviction(self):
|
def test_taint_based_eviction(self):
|
||||||
'''
|
'''
|
||||||
|
|
@ -551,6 +656,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
# patch node and test if master is failing over to one of the expected nodes
|
# patch node and test if master is failing over to one of the expected nodes
|
||||||
k8s.api.core_v1.patch_node(current_master_node, body)
|
k8s.api.core_v1.patch_node(current_master_node, body)
|
||||||
new_master_node, new_replica_nodes = self.assert_failover(
|
new_master_node, new_replica_nodes = self.assert_failover(
|
||||||
|
|
@ -570,17 +676,92 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
# toggle pod anti affinity to move replica away from master node
|
# toggle pod anti affinity to move replica away from master node
|
||||||
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
|
def test_x_cluster_deletion(self):
|
||||||
|
'''
|
||||||
|
Test deletion with configured protection
|
||||||
|
'''
|
||||||
|
k8s = self.k8s
|
||||||
|
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||||
|
|
||||||
|
# configure delete protection
|
||||||
|
patch_delete_annotations = {
|
||||||
|
"data": {
|
||||||
|
"delete_annotation_date_key": "delete-date",
|
||||||
|
"delete_annotation_name_key": "delete-clustername"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
k8s.update_config(patch_delete_annotations)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# this delete attempt should be omitted because of missing annotations
|
||||||
|
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
||||||
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
|
||||||
|
|
||||||
|
# check that pods and services are still there
|
||||||
|
k8s.wait_for_running_pods(cluster_label, 2)
|
||||||
|
k8s.wait_for_service(cluster_label)
|
||||||
|
|
||||||
|
# recreate Postgres cluster resource
|
||||||
|
k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml")
|
||||||
|
|
||||||
|
# wait a little before proceeding
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
# add annotations to manifest
|
||||||
|
delete_date = datetime.today().strftime('%Y-%m-%d')
|
||||||
|
pg_patch_delete_annotations = {
|
||||||
|
"metadata": {
|
||||||
|
"annotations": {
|
||||||
|
"delete-date": delete_date,
|
||||||
|
"delete-clustername": "acid-minimal-cluster",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
|
||||||
|
|
||||||
|
# wait a little before proceeding
|
||||||
|
time.sleep(10)
|
||||||
|
k8s.wait_for_running_pods(cluster_label, 2)
|
||||||
|
k8s.wait_for_service(cluster_label)
|
||||||
|
|
||||||
|
# now delete process should be triggered
|
||||||
|
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
||||||
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
|
||||||
|
|
||||||
|
# wait until cluster is deleted
|
||||||
|
time.sleep(120)
|
||||||
|
|
||||||
|
# check if everything has been deleted
|
||||||
|
self.assertEqual(0, k8s.count_pods_with_label(cluster_label))
|
||||||
|
self.assertEqual(0, k8s.count_services_with_label(cluster_label))
|
||||||
|
self.assertEqual(0, k8s.count_endpoints_with_label(cluster_label))
|
||||||
|
self.assertEqual(0, k8s.count_statefulsets_with_label(cluster_label))
|
||||||
|
self.assertEqual(0, k8s.count_deployments_with_label(cluster_label))
|
||||||
|
self.assertEqual(0, k8s.count_pdbs_with_label(cluster_label))
|
||||||
|
self.assertEqual(0, k8s.count_secrets_with_label(cluster_label))
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
def get_failover_targets(self, master_node, replica_nodes):
|
def get_failover_targets(self, master_node, replica_nodes):
|
||||||
'''
|
'''
|
||||||
If all pods live on the same node, failover will happen to other worker(s)
|
If all pods live on the same node, failover will happen to other worker(s)
|
||||||
'''
|
'''
|
||||||
k8s = self.k8s
|
k8s = self.k8s
|
||||||
|
k8s_master_exclusion = 'kubernetes.io/hostname!=postgres-operator-e2e-tests-control-plane'
|
||||||
|
|
||||||
failover_targets = [x for x in replica_nodes if x != master_node]
|
failover_targets = [x for x in replica_nodes if x != master_node]
|
||||||
if len(failover_targets) == 0:
|
if len(failover_targets) == 0:
|
||||||
nodes = k8s.api.core_v1.list_node()
|
nodes = k8s.api.core_v1.list_node(label_selector=k8s_master_exclusion)
|
||||||
for n in nodes.items:
|
for n in nodes.items:
|
||||||
if "node-role.kubernetes.io/master" not in n.metadata.labels and n.metadata.name != master_node:
|
if n.metadata.name != master_node:
|
||||||
failover_targets.append(n.metadata.name)
|
failover_targets.append(n.metadata.name)
|
||||||
|
|
||||||
return failover_targets
|
return failover_targets
|
||||||
|
|
@ -628,8 +809,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_enable_antiaffinity)
|
k8s.update_config(patch_enable_antiaffinity)
|
||||||
self.assert_failover(
|
self.assert_failover(master_node, len(replica_nodes), failover_targets, cluster_label)
|
||||||
master_node, len(replica_nodes), failover_targets, cluster_label)
|
|
||||||
|
|
||||||
# now disable pod anti affintiy again which will cause yet another failover
|
# now disable pod anti affintiy again which will cause yet another failover
|
||||||
patch_disable_antiaffinity = {
|
patch_disable_antiaffinity = {
|
||||||
|
|
@ -656,11 +836,13 @@ class K8sApi:
|
||||||
self.apps_v1 = client.AppsV1Api()
|
self.apps_v1 = client.AppsV1Api()
|
||||||
self.batch_v1_beta1 = client.BatchV1beta1Api()
|
self.batch_v1_beta1 = client.BatchV1beta1Api()
|
||||||
self.custom_objects_api = client.CustomObjectsApi()
|
self.custom_objects_api = client.CustomObjectsApi()
|
||||||
|
self.policy_v1_beta1 = client.PolicyV1beta1Api()
|
||||||
|
self.storage_v1_api = client.StorageV1Api()
|
||||||
|
|
||||||
|
|
||||||
class K8s:
|
class K8s:
|
||||||
'''
|
'''
|
||||||
Wraps around K8 api client and helper methods.
|
Wraps around K8s api client and helper methods.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
RETRY_TIMEOUT_SEC = 10
|
RETRY_TIMEOUT_SEC = 10
|
||||||
|
|
@ -711,14 +893,6 @@ class K8s:
|
||||||
if pods:
|
if pods:
|
||||||
pod_phase = pods[0].status.phase
|
pod_phase = pods[0].status.phase
|
||||||
|
|
||||||
if pods and pod_phase != 'Running':
|
|
||||||
pod_name = pods[0].metadata.name
|
|
||||||
response = self.api.core_v1.read_namespaced_pod(
|
|
||||||
name=pod_name,
|
|
||||||
namespace=namespace
|
|
||||||
)
|
|
||||||
print("Pod description {}".format(response))
|
|
||||||
|
|
||||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||||
|
|
||||||
def get_service_type(self, svc_labels, namespace='default'):
|
def get_service_type(self, svc_labels, namespace='default'):
|
||||||
|
|
@ -780,6 +954,25 @@ class K8s:
|
||||||
def count_pods_with_label(self, labels, namespace='default'):
|
def count_pods_with_label(self, labels, namespace='default'):
|
||||||
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
|
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
|
||||||
|
|
||||||
|
def count_services_with_label(self, labels, namespace='default'):
|
||||||
|
return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items)
|
||||||
|
|
||||||
|
def count_endpoints_with_label(self, labels, namespace='default'):
|
||||||
|
return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items)
|
||||||
|
|
||||||
|
def count_secrets_with_label(self, labels, namespace='default'):
|
||||||
|
return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items)
|
||||||
|
|
||||||
|
def count_statefulsets_with_label(self, labels, namespace='default'):
|
||||||
|
return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items)
|
||||||
|
|
||||||
|
def count_deployments_with_label(self, labels, namespace='default'):
|
||||||
|
return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items)
|
||||||
|
|
||||||
|
def count_pdbs_with_label(self, labels, namespace='default'):
|
||||||
|
return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget(
|
||||||
|
namespace, label_selector=labels).items)
|
||||||
|
|
||||||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||||
pod_phase = 'Failing over'
|
pod_phase = 'Failing over'
|
||||||
new_pod_node = ''
|
new_pod_node = ''
|
||||||
|
|
@ -820,6 +1013,11 @@ class K8s:
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE)
|
stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
def exec_with_kubectl(self, pod, cmd):
|
||||||
|
return subprocess.run(["./exec.sh", pod, cmd],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE)
|
||||||
|
|
||||||
def get_effective_pod_image(self, pod_name, namespace='default'):
|
def get_effective_pod_image(self, pod_name, namespace='default'):
|
||||||
'''
|
'''
|
||||||
Get the Spilo image pod currently uses. In case of lazy rolling updates
|
Get the Spilo image pod currently uses. In case of lazy rolling updates
|
||||||
|
|
|
||||||
18
go.mod
18
go.mod
|
|
@ -3,18 +3,18 @@ module github.com/zalando/postgres-operator
|
||||||
go 1.14
|
go 1.14
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go v1.32.2
|
github.com/aws/aws-sdk-go v1.34.10
|
||||||
github.com/lib/pq v1.7.0
|
github.com/lib/pq v1.8.0
|
||||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||||
github.com/r3labs/diff v1.1.0
|
github.com/r3labs/diff v1.1.0
|
||||||
github.com/sirupsen/logrus v1.6.0
|
github.com/sirupsen/logrus v1.6.0
|
||||||
github.com/stretchr/testify v1.5.1
|
github.com/stretchr/testify v1.5.1
|
||||||
golang.org/x/mod v0.3.0 // indirect
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||||
golang.org/x/tools v0.0.0-20200615222825-6aa8f57aacd9 // indirect
|
golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab // indirect
|
||||||
gopkg.in/yaml.v2 v2.2.8
|
gopkg.in/yaml.v2 v2.2.8
|
||||||
k8s.io/api v0.18.3
|
k8s.io/api v0.18.8
|
||||||
k8s.io/apiextensions-apiserver v0.18.3
|
k8s.io/apiextensions-apiserver v0.18.0
|
||||||
k8s.io/apimachinery v0.18.3
|
k8s.io/apimachinery v0.18.8
|
||||||
k8s.io/client-go v0.18.3
|
k8s.io/client-go v0.18.8
|
||||||
k8s.io/code-generator v0.18.3
|
k8s.io/code-generator v0.18.8
|
||||||
)
|
)
|
||||||
|
|
|
||||||
74
go.sum
74
go.sum
|
|
@ -26,8 +26,8 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||||
github.com/aws/aws-sdk-go v1.32.2 h1:X5/tQ4cuqCCUZgeOh41WFh9Eq5xe32JzWe4PSE2i1ME=
|
github.com/aws/aws-sdk-go v1.34.10 h1:VU78gcf/3wA4HNEDCHidK738l7K0Bals4SJnfnvXOtY=
|
||||||
github.com/aws/aws-sdk-go v1.32.2/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
github.com/aws/aws-sdk-go v1.34.10/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
|
|
@ -64,6 +64,7 @@ github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
||||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
|
@ -136,7 +137,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
|
@ -145,7 +145,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
|
@ -155,7 +154,6 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
|
||||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||||
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
|
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
|
||||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||||
|
|
@ -175,11 +173,11 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
|
||||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
|
||||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
|
@ -198,8 +196,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
|
github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg=
|
||||||
github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
|
@ -270,7 +268,6 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
|
|
@ -279,7 +276,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||||
|
|
@ -291,7 +287,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
||||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||||
|
|
@ -308,17 +304,15 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||||
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
|
||||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
|
@ -339,8 +333,8 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||||
|
|
@ -351,6 +345,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
|
@ -363,18 +358,17 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||||
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
|
|
||||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
|
||||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
|
@ -392,12 +386,12 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||||
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200615222825-6aa8f57aacd9 h1:cwgUY+1ja2qxWb2dyaCoixaA66WGWmrijSlxaM+JM/g=
|
golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab h1:CyH2SDm5ATQiX9gtbMYfvNNed97A9v+TJFnUX/fTaJY=
|
||||||
golang.org/x/tools v0.0.0-20200615222825-6aa8f57aacd9/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
|
@ -428,26 +422,30 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0=
|
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
|
||||||
k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA=
|
k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4=
|
||||||
k8s.io/apiextensions-apiserver v0.18.3 h1:h6oZO+iAgg0HjxmuNnguNdKNB9+wv3O1EBDdDWJViQ0=
|
k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY=
|
||||||
k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE=
|
k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q=
|
||||||
k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk=
|
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
|
||||||
k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
|
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
||||||
k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw=
|
k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0=
|
||||||
k8s.io/client-go v0.18.3 h1:QaJzz92tsN67oorwzmoB0a9r9ZVHuD5ryjbCKP0U22k=
|
k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig=
|
||||||
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
|
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
|
||||||
k8s.io/code-generator v0.18.3 h1:5H57pYEbkMMXCLKD16YQH3yDPAbVLweUsB1M3m70D1c=
|
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
|
||||||
k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
k8s.io/client-go v0.18.8 h1:SdbLpIxk5j5YbFr1b7fq8S7mDgDjYmUxSbszyoesoDM=
|
||||||
k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k=
|
k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU=
|
||||||
|
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
|
||||||
|
k8s.io/code-generator v0.18.8 h1:lgO1P1wjikEtzNvj7ia+x1VC4svJ28a/r0wnOLhhOTU=
|
||||||
|
k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
||||||
|
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
|
||||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
||||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
|
|
@ -455,6 +453,7 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc
|
||||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
||||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
||||||
|
|
@ -463,7 +462,6 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
|
||||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,8 @@ metadata:
|
||||||
# environment: demo
|
# environment: demo
|
||||||
# annotations:
|
# annotations:
|
||||||
# "acid.zalan.do/controller": "second-operator"
|
# "acid.zalan.do/controller": "second-operator"
|
||||||
|
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||||
|
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||||
spec:
|
spec:
|
||||||
dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p3
|
dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p3
|
||||||
teamId: "acid"
|
teamId: "acid"
|
||||||
|
|
@ -66,6 +68,8 @@ spec:
|
||||||
# name: my-config-map
|
# name: my-config-map
|
||||||
|
|
||||||
enableShmVolume: true
|
enableShmVolume: true
|
||||||
|
# spiloRunAsUser: 101
|
||||||
|
# spiloRunAsGroup: 103
|
||||||
# spiloFSGroup: 103
|
# spiloFSGroup: 103
|
||||||
# podAnnotations:
|
# podAnnotations:
|
||||||
# annotation.key: value
|
# annotation.key: value
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ data:
|
||||||
# connection_pooler_default_cpu_request: "500m"
|
# connection_pooler_default_cpu_request: "500m"
|
||||||
# connection_pooler_default_memory_limit: 100Mi
|
# connection_pooler_default_memory_limit: 100Mi
|
||||||
# connection_pooler_default_memory_request: 100Mi
|
# connection_pooler_default_memory_request: 100Mi
|
||||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-8"
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-11"
|
||||||
# connection_pooler_max_db_connections: 60
|
# connection_pooler_max_db_connections: 60
|
||||||
# connection_pooler_mode: "transaction"
|
# connection_pooler_mode: "transaction"
|
||||||
# connection_pooler_number_of_instances: 2
|
# connection_pooler_number_of_instances: 2
|
||||||
|
|
@ -29,7 +29,9 @@ data:
|
||||||
# default_cpu_request: 100m
|
# default_cpu_request: 100m
|
||||||
# default_memory_limit: 500Mi
|
# default_memory_limit: 500Mi
|
||||||
# default_memory_request: 100Mi
|
# default_memory_request: 100Mi
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3
|
# delete_annotation_date_key: delete-date
|
||||||
|
# delete_annotation_name_key: delete-clustername
|
||||||
|
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p5
|
||||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||||
# enable_admin_role_for_users: "true"
|
# enable_admin_role_for_users: "true"
|
||||||
# enable_crd_validation: "true"
|
# enable_crd_validation: "true"
|
||||||
|
|
@ -45,9 +47,11 @@ data:
|
||||||
# enable_team_superuser: "false"
|
# enable_team_superuser: "false"
|
||||||
enable_teams_api: "false"
|
enable_teams_api: "false"
|
||||||
# etcd_host: ""
|
# etcd_host: ""
|
||||||
|
external_traffic_policy: "Cluster"
|
||||||
# gcp_credentials: ""
|
# gcp_credentials: ""
|
||||||
# kubernetes_use_configmaps: "false"
|
# kubernetes_use_configmaps: "false"
|
||||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||||
|
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
||||||
# inherited_labels: application,environment
|
# inherited_labels: application,environment
|
||||||
# kube_iam_role: ""
|
# kube_iam_role: ""
|
||||||
# log_s3_bucket: ""
|
# log_s3_bucket: ""
|
||||||
|
|
@ -74,8 +78,10 @@ data:
|
||||||
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
pod_deletion_wait_timeout: 10m
|
pod_deletion_wait_timeout: 10m
|
||||||
# pod_environment_configmap: "default/my-custom-config"
|
# pod_environment_configmap: "default/my-custom-config"
|
||||||
|
# pod_environment_secret: "my-custom-secret"
|
||||||
pod_label_wait_timeout: 10m
|
pod_label_wait_timeout: 10m
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
|
# pod_priority_class_name: "postgres-pod-priority"
|
||||||
pod_role_label: spilo-role
|
pod_role_label: spilo-role
|
||||||
# pod_service_account_definition: ""
|
# pod_service_account_definition: ""
|
||||||
pod_service_account_name: "postgres-pod"
|
pod_service_account_name: "postgres-pod"
|
||||||
|
|
@ -95,8 +101,11 @@ data:
|
||||||
secret_name_template: "{username}.{cluster}.credentials"
|
secret_name_template: "{username}.{cluster}.credentials"
|
||||||
# sidecar_docker_images: ""
|
# sidecar_docker_images: ""
|
||||||
# set_memory_request_to_limit: "false"
|
# set_memory_request_to_limit: "false"
|
||||||
|
# spilo_runasuser: 101
|
||||||
|
# spilo_runasgroup: 103
|
||||||
# spilo_fsgroup: 103
|
# spilo_fsgroup: 103
|
||||||
spilo_privileged: "false"
|
spilo_privileged: "false"
|
||||||
|
# storage_resize_mode: "off"
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
# team_admin_role: "admin"
|
# team_admin_role: "admin"
|
||||||
# team_api_role_configuration: "log_statement:all"
|
# team_api_role_configuration: "log_statement:all"
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
namespace: kube-system
|
||||||
|
name: standard
|
||||||
|
annotations:
|
||||||
|
storageclass.kubernetes.io/is-default-class: "true"
|
||||||
|
provisioner: kubernetes.io/host-path
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
apiVersion: v1
|
||||||
|
data:
|
||||||
|
# infrastructure role definition in the new format
|
||||||
|
# robot_zmon_acid_monitoring_new
|
||||||
|
user: cm9ib3Rfem1vbl9hY2lkX21vbml0b3JpbmdfbmV3
|
||||||
|
# foobar_new
|
||||||
|
password: Zm9vYmFyX25ldw==
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: postgresql-infrastructure-roles-new
|
||||||
|
namespace: default
|
||||||
|
type: Opaque
|
||||||
|
|
@ -7,12 +7,14 @@ data:
|
||||||
# provide other options in the configmap.
|
# provide other options in the configmap.
|
||||||
# robot_zmon_acid_monitoring
|
# robot_zmon_acid_monitoring
|
||||||
user1: cm9ib3Rfem1vbl9hY2lkX21vbml0b3Jpbmc=
|
user1: cm9ib3Rfem1vbl9hY2lkX21vbml0b3Jpbmc=
|
||||||
|
# foobar
|
||||||
|
password1: Zm9vYmFy
|
||||||
# robot_zmon
|
# robot_zmon
|
||||||
inrole1: cm9ib3Rfem1vbg==
|
inrole1: cm9ib3Rfem1vbg==
|
||||||
# testuser
|
# testuser
|
||||||
user2: dGVzdHVzZXI=
|
user2: dGVzdHVzZXI=
|
||||||
# foobar
|
# testpassword
|
||||||
password2: Zm9vYmFy
|
password2: dGVzdHBhc3N3b3Jk
|
||||||
# user batman with the password justice
|
# user batman with the password justice
|
||||||
# look for other fields in the infrastructure roles configmap
|
# look for other fields in the infrastructure roles configmap
|
||||||
batman: anVzdGljZQ==
|
batman: anVzdGljZQ==
|
||||||
|
|
|
||||||
|
|
@ -113,6 +113,10 @@ spec:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: string
|
type: string
|
||||||
|
delete_annotation_date_key:
|
||||||
|
type: string
|
||||||
|
delete_annotation_name_key:
|
||||||
|
type: string
|
||||||
downscaler_annotations:
|
downscaler_annotations:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
|
@ -127,6 +131,32 @@ spec:
|
||||||
type: boolean
|
type: boolean
|
||||||
infrastructure_roles_secret_name:
|
infrastructure_roles_secret_name:
|
||||||
type: string
|
type: string
|
||||||
|
infrastructure_roles_secrets:
|
||||||
|
type: array
|
||||||
|
nullable: true
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- secretname
|
||||||
|
- userkey
|
||||||
|
- passwordkey
|
||||||
|
properties:
|
||||||
|
secretname:
|
||||||
|
type: string
|
||||||
|
userkey:
|
||||||
|
type: string
|
||||||
|
passwordkey:
|
||||||
|
type: string
|
||||||
|
rolekey:
|
||||||
|
type: string
|
||||||
|
defaultuservalue:
|
||||||
|
type: string
|
||||||
|
defaultrolevalue:
|
||||||
|
type: string
|
||||||
|
details:
|
||||||
|
type: string
|
||||||
|
template:
|
||||||
|
type: boolean
|
||||||
inherited_labels:
|
inherited_labels:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
|
@ -145,6 +175,8 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
pod_environment_configmap:
|
pod_environment_configmap:
|
||||||
type: string
|
type: string
|
||||||
|
pod_environment_secret:
|
||||||
|
type: string
|
||||||
pod_management_policy:
|
pod_management_policy:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
|
|
@ -164,10 +196,20 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
secret_name_template:
|
secret_name_template:
|
||||||
type: string
|
type: string
|
||||||
|
spilo_runasuser:
|
||||||
|
type: integer
|
||||||
|
spilo_runasgroup:
|
||||||
|
type: integer
|
||||||
spilo_fsgroup:
|
spilo_fsgroup:
|
||||||
type: integer
|
type: integer
|
||||||
spilo_privileged:
|
spilo_privileged:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
storage_resize_mode:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- "ebs"
|
||||||
|
- "pvc"
|
||||||
|
- "off"
|
||||||
toleration:
|
toleration:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
|
|
@ -223,6 +265,11 @@ spec:
|
||||||
type: boolean
|
type: boolean
|
||||||
enable_replica_load_balancer:
|
enable_replica_load_balancer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
external_traffic_policy:
|
||||||
|
type: string
|
||||||
|
enum:
|
||||||
|
- "Cluster"
|
||||||
|
- "Local"
|
||||||
master_dns_name_format:
|
master_dns_name_format:
|
||||||
type: string
|
type: string
|
||||||
replica_dns_name_format:
|
replica_dns_name_format:
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,11 @@
|
||||||
|
apiVersion: scheduling.k8s.io/v1
|
||||||
|
description: 'This priority class must be used only for databases controlled by the
|
||||||
|
Postgres operator'
|
||||||
|
kind: PriorityClass
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
application: postgres-operator
|
||||||
|
name: postgres-pod-priority
|
||||||
|
preemptionPolicy: PreemptLowerPriority
|
||||||
|
globalDefault: false
|
||||||
|
value: 1000000
|
||||||
|
|
@ -31,6 +31,8 @@ configuration:
|
||||||
# custom_pod_annotations:
|
# custom_pod_annotations:
|
||||||
# keya: valuea
|
# keya: valuea
|
||||||
# keyb: valueb
|
# keyb: valueb
|
||||||
|
# delete_annotation_date_key: delete-date
|
||||||
|
# delete_annotation_name_key: delete-clustername
|
||||||
# downscaler_annotations:
|
# downscaler_annotations:
|
||||||
# - deployment-time
|
# - deployment-time
|
||||||
# - downscaler/*
|
# - downscaler/*
|
||||||
|
|
@ -39,6 +41,14 @@ configuration:
|
||||||
enable_pod_disruption_budget: true
|
enable_pod_disruption_budget: true
|
||||||
enable_sidecars: true
|
enable_sidecars: true
|
||||||
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||||
|
# infrastructure_roles_secrets:
|
||||||
|
# - secretname: "monitoring-roles"
|
||||||
|
# userkey: "user"
|
||||||
|
# passwordkey: "password"
|
||||||
|
# rolekey: "inrole"
|
||||||
|
# - secretname: "other-infrastructure-role"
|
||||||
|
# userkey: "other-user-key"
|
||||||
|
# passwordkey: "other-password-key"
|
||||||
# inherited_labels:
|
# inherited_labels:
|
||||||
# - application
|
# - application
|
||||||
# - environment
|
# - environment
|
||||||
|
|
@ -49,16 +59,20 @@ configuration:
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
# pod_environment_configmap: "default/my-custom-config"
|
# pod_environment_configmap: "default/my-custom-config"
|
||||||
|
# pod_environment_secret: "my-custom-secret"
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
# pod_priority_class_name: ""
|
# pod_priority_class_name: "postgres-pod-priority"
|
||||||
pod_role_label: spilo-role
|
pod_role_label: spilo-role
|
||||||
# pod_service_account_definition: ""
|
# pod_service_account_definition: ""
|
||||||
pod_service_account_name: postgres-pod
|
pod_service_account_name: postgres-pod
|
||||||
# pod_service_account_role_binding_definition: ""
|
# pod_service_account_role_binding_definition: ""
|
||||||
pod_terminate_grace_period: 5m
|
pod_terminate_grace_period: 5m
|
||||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||||
|
# spilo_runasuser: 101
|
||||||
|
# spilo_runasgroup: 103
|
||||||
# spilo_fsgroup: 103
|
# spilo_fsgroup: 103
|
||||||
spilo_privileged: false
|
spilo_privileged: false
|
||||||
|
storage_resize_mode: ebs
|
||||||
# toleration: {}
|
# toleration: {}
|
||||||
# watched_namespace: ""
|
# watched_namespace: ""
|
||||||
postgres_pod_resources:
|
postgres_pod_resources:
|
||||||
|
|
@ -76,12 +90,13 @@ configuration:
|
||||||
resource_check_interval: 3s
|
resource_check_interval: 3s
|
||||||
resource_check_timeout: 10m
|
resource_check_timeout: 10m
|
||||||
load_balancer:
|
load_balancer:
|
||||||
# db_hosted_zone: ""
|
|
||||||
enable_master_load_balancer: false
|
|
||||||
enable_replica_load_balancer: false
|
|
||||||
# custom_service_annotations:
|
# custom_service_annotations:
|
||||||
# keyx: valuex
|
# keyx: valuex
|
||||||
# keyy: valuey
|
# keyy: valuey
|
||||||
|
# db_hosted_zone: ""
|
||||||
|
enable_master_load_balancer: false
|
||||||
|
enable_replica_load_balancer: false
|
||||||
|
external_traffic_policy: "Cluster"
|
||||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||||
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||||
aws_or_gcp:
|
aws_or_gcp:
|
||||||
|
|
@ -128,7 +143,7 @@ configuration:
|
||||||
connection_pooler_default_cpu_request: "500m"
|
connection_pooler_default_cpu_request: "500m"
|
||||||
connection_pooler_default_memory_limit: 100Mi
|
connection_pooler_default_memory_limit: 100Mi
|
||||||
connection_pooler_default_memory_request: 100Mi
|
connection_pooler_default_memory_request: 100Mi
|
||||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-8"
|
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9"
|
||||||
# connection_pooler_max_db_connections: 60
|
# connection_pooler_max_db_connections: 60
|
||||||
connection_pooler_mode: "transaction"
|
connection_pooler_mode: "transaction"
|
||||||
connection_pooler_number_of_instances: 2
|
connection_pooler_number_of_instances: 2
|
||||||
|
|
|
||||||
|
|
@ -370,6 +370,10 @@ spec:
|
||||||
items:
|
items:
|
||||||
type: object
|
type: object
|
||||||
additionalProperties: true
|
additionalProperties: true
|
||||||
|
spiloRunAsUser:
|
||||||
|
type: integer
|
||||||
|
spiloRunAsGroup:
|
||||||
|
type: integer
|
||||||
spiloFSGroup:
|
spiloFSGroup:
|
||||||
type: integer
|
type: integer
|
||||||
standby:
|
standby:
|
||||||
|
|
|
||||||
|
|
@ -519,6 +519,12 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"spiloRunAsUser": {
|
||||||
|
Type: "integer",
|
||||||
|
},
|
||||||
|
"spiloRunAsGroup": {
|
||||||
|
Type: "integer",
|
||||||
|
},
|
||||||
"spiloFSGroup": {
|
"spiloFSGroup": {
|
||||||
Type: "integer",
|
Type: "integer",
|
||||||
},
|
},
|
||||||
|
|
@ -888,6 +894,12 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"delete_annotation_date_key": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"delete_annotation_name_key": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
"downscaler_annotations": {
|
"downscaler_annotations": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
||||||
|
|
@ -911,6 +923,41 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
"infrastructure_roles_secret_name": {
|
"infrastructure_roles_secret_name": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
"infrastructure_roles_secrets": {
|
||||||
|
Type: "array",
|
||||||
|
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||||
|
Type: "object",
|
||||||
|
Required: []string{"secretname", "userkey", "passwordkey"},
|
||||||
|
Properties: map[string]apiextv1beta1.JSONSchemaProps{
|
||||||
|
"secretname": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"userkey": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"passwordkey": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"rolekey": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"defaultuservalue": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"defaultrolevalue": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"details": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"template": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"inherited_labels": {
|
"inherited_labels": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
||||||
|
|
@ -942,6 +989,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
"pod_environment_configmap": {
|
"pod_environment_configmap": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
"pod_environment_secret": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
"pod_management_policy": {
|
"pod_management_policy": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Enum: []apiextv1beta1.JSON{
|
Enum: []apiextv1beta1.JSON{
|
||||||
|
|
@ -974,12 +1024,32 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
"secret_name_template": {
|
"secret_name_template": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
"spilo_runasuser": {
|
||||||
|
Type: "integer",
|
||||||
|
},
|
||||||
|
"spilo_runasgroup": {
|
||||||
|
Type: "integer",
|
||||||
|
},
|
||||||
"spilo_fsgroup": {
|
"spilo_fsgroup": {
|
||||||
Type: "integer",
|
Type: "integer",
|
||||||
},
|
},
|
||||||
"spilo_privileged": {
|
"spilo_privileged": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
"storage_resize_mode": {
|
||||||
|
Type: "string",
|
||||||
|
Enum: []apiextv1beta1.JSON{
|
||||||
|
{
|
||||||
|
Raw: []byte(`"ebs"`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Raw: []byte(`"pvc"`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Raw: []byte(`"off"`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"toleration": {
|
"toleration": {
|
||||||
Type: "object",
|
Type: "object",
|
||||||
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
|
AdditionalProperties: &apiextv1beta1.JSONSchemaPropsOrBool{
|
||||||
|
|
@ -1065,6 +1135,17 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
||||||
"enable_replica_load_balancer": {
|
"enable_replica_load_balancer": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
"external_traffic_policy": {
|
||||||
|
Type: "string",
|
||||||
|
Enum: []apiextv1beta1.JSON{
|
||||||
|
{
|
||||||
|
Raw: []byte(`"Cluster"`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Raw: []byte(`"Local"`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"master_dns_name_format": {
|
"master_dns_name_format": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -112,8 +112,9 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error {
|
||||||
|
|
||||||
if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil {
|
if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil {
|
||||||
tmp2.Error = err.Error()
|
tmp2.Error = err.Error()
|
||||||
tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid
|
tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}
|
||||||
} else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil {
|
} else if err := validateCloneClusterDescription(tmp2.Spec.Clone); err != nil {
|
||||||
|
|
||||||
tmp2.Error = err.Error()
|
tmp2.Error = err.Error()
|
||||||
tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid
|
tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -49,26 +49,33 @@ type KubernetesMetaConfiguration struct {
|
||||||
PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"`
|
PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"`
|
||||||
PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"`
|
PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"`
|
||||||
SpiloPrivileged bool `json:"spilo_privileged,omitempty"`
|
SpiloPrivileged bool `json:"spilo_privileged,omitempty"`
|
||||||
|
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||||
|
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||||
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
||||||
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
||||||
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
||||||
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
|
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
|
||||||
|
StorageResizeMode string `json:"storage_resize_mode,omitempty"`
|
||||||
EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
|
EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
|
||||||
EnableSidecars *bool `json:"enable_sidecars,omitempty"`
|
EnableSidecars *bool `json:"enable_sidecars,omitempty"`
|
||||||
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
|
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
|
||||||
ClusterDomain string `json:"cluster_domain,omitempty"`
|
ClusterDomain string `json:"cluster_domain,omitempty"`
|
||||||
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`
|
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`
|
||||||
InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"`
|
InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"`
|
||||||
|
InfrastructureRolesDefs []*config.InfrastructureRole `json:"infrastructure_roles_secrets,omitempty"`
|
||||||
PodRoleLabel string `json:"pod_role_label,omitempty"`
|
PodRoleLabel string `json:"pod_role_label,omitempty"`
|
||||||
ClusterLabels map[string]string `json:"cluster_labels,omitempty"`
|
ClusterLabels map[string]string `json:"cluster_labels,omitempty"`
|
||||||
InheritedLabels []string `json:"inherited_labels,omitempty"`
|
InheritedLabels []string `json:"inherited_labels,omitempty"`
|
||||||
DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"`
|
DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"`
|
||||||
ClusterNameLabel string `json:"cluster_name_label,omitempty"`
|
ClusterNameLabel string `json:"cluster_name_label,omitempty"`
|
||||||
|
DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"`
|
||||||
|
DeleteAnnotationNameKey string `json:"delete_annotation_name_key,omitempty"`
|
||||||
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"`
|
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"`
|
||||||
CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"`
|
CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"`
|
||||||
// TODO: use a proper toleration structure?
|
// TODO: use a proper toleration structure?
|
||||||
PodToleration map[string]string `json:"toleration,omitempty"`
|
PodToleration map[string]string `json:"toleration,omitempty"`
|
||||||
PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"`
|
PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"`
|
||||||
|
PodEnvironmentSecret string `json:"pod_environment_secret,omitempty"`
|
||||||
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
|
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
|
||||||
MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
|
MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
|
||||||
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
|
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
|
||||||
|
|
@ -104,6 +111,7 @@ type LoadBalancerConfiguration struct {
|
||||||
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
|
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
|
||||||
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
|
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
|
||||||
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
|
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
|
||||||
|
ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AWSGCPConfiguration defines the configuration for AWS
|
// AWSGCPConfiguration defines the configuration for AWS
|
||||||
|
|
@ -197,8 +205,7 @@ type OperatorConfigurationData struct {
|
||||||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||||
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
||||||
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
||||||
// deprecated in favour of SidecarContainers
|
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers
|
||||||
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"`
|
|
||||||
SidecarContainers []v1.Container `json:"sidecars,omitempty"`
|
SidecarContainers []v1.Container `json:"sidecars,omitempty"`
|
||||||
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
||||||
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
||||||
|
|
|
||||||
|
|
@ -35,6 +35,8 @@ type PostgresSpec struct {
|
||||||
TeamID string `json:"teamId"`
|
TeamID string `json:"teamId"`
|
||||||
DockerImage string `json:"dockerImage,omitempty"`
|
DockerImage string `json:"dockerImage,omitempty"`
|
||||||
|
|
||||||
|
SpiloRunAsUser *int64 `json:"spiloRunAsUser,omitempty"`
|
||||||
|
SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"`
|
||||||
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
|
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
|
||||||
|
|
||||||
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
|
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
|
||||||
|
|
@ -53,7 +55,7 @@ type PostgresSpec struct {
|
||||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||||
Users map[string]UserFlags `json:"users"`
|
Users map[string]UserFlags `json:"users"`
|
||||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||||
Clone CloneDescription `json:"clone"`
|
Clone *CloneDescription `json:"clone,omitempty"`
|
||||||
ClusterName string `json:"-"`
|
ClusterName string `json:"-"`
|
||||||
Databases map[string]string `json:"databases,omitempty"`
|
Databases map[string]string `json:"databases,omitempty"`
|
||||||
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||||
|
|
@ -64,10 +66,10 @@ type PostgresSpec struct {
|
||||||
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
||||||
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
||||||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||||
StandbyCluster *StandbyDescription `json:"standby"`
|
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
|
||||||
PodAnnotations map[string]string `json:"podAnnotations"`
|
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||||
ServiceAnnotations map[string]string `json:"serviceAnnotations"`
|
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
||||||
TLS *TLSDescription `json:"tls"`
|
TLS *TLSDescription `json:"tls,omitempty"`
|
||||||
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
|
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
|
||||||
|
|
||||||
// deprecated json tags
|
// deprecated json tags
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ func extractClusterName(clusterName string, teamName string) (string, error) {
|
||||||
|
|
||||||
func validateCloneClusterDescription(clone *CloneDescription) error {
|
func validateCloneClusterDescription(clone *CloneDescription) error {
|
||||||
// when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name
|
// when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name
|
||||||
if clone.ClusterName != "" && clone.EndTimestamp == "" {
|
if clone != nil && clone.ClusterName != "" && clone.EndTimestamp == "" {
|
||||||
if !serviceNameRegex.MatchString(clone.ClusterName) {
|
if !serviceNameRegex.MatchString(clone.ClusterName) {
|
||||||
return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q",
|
return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q",
|
||||||
serviceNameRegexString)
|
serviceNameRegexString)
|
||||||
|
|
|
||||||
|
|
@ -163,7 +163,7 @@ var unmarshalCluster = []struct {
|
||||||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":"Invalid"}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
{
|
{
|
||||||
about: "example with /status subresource",
|
about: "example with /status subresource",
|
||||||
|
|
@ -184,7 +184,7 @@ var unmarshalCluster = []struct {
|
||||||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
{
|
{
|
||||||
about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName",
|
about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName",
|
||||||
|
|
@ -327,7 +327,7 @@ var unmarshalCluster = []struct {
|
||||||
EndTime: mustParseTime("05:15"),
|
EndTime: mustParseTime("05:15"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Clone: CloneDescription{
|
Clone: &CloneDescription{
|
||||||
ClusterName: "acid-batman",
|
ClusterName: "acid-batman",
|
||||||
},
|
},
|
||||||
ClusterName: "testcluster1",
|
ClusterName: "testcluster1",
|
||||||
|
|
@ -351,7 +351,7 @@ var unmarshalCluster = []struct {
|
||||||
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
|
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
|
||||||
Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
|
Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
{
|
{
|
||||||
about: "example with clone",
|
about: "example with clone",
|
||||||
|
|
@ -366,7 +366,7 @@ var unmarshalCluster = []struct {
|
||||||
},
|
},
|
||||||
Spec: PostgresSpec{
|
Spec: PostgresSpec{
|
||||||
TeamID: "acid",
|
TeamID: "acid",
|
||||||
Clone: CloneDescription{
|
Clone: &CloneDescription{
|
||||||
ClusterName: "team-batman",
|
ClusterName: "team-batman",
|
||||||
},
|
},
|
||||||
ClusterName: "testcluster1",
|
ClusterName: "testcluster1",
|
||||||
|
|
@ -405,7 +405,7 @@ var unmarshalCluster = []struct {
|
||||||
err: errors.New("unexpected end of JSON input")},
|
err: errors.New("unexpected end of JSON input")},
|
||||||
{
|
{
|
||||||
about: "expect error on JSON with field's value malformatted",
|
about: "expect error on JSON with field's value malformatted",
|
||||||
in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||||
out: Postgresql{},
|
out: Postgresql{},
|
||||||
marshal: []byte{},
|
marshal: []byte{},
|
||||||
err: errors.New("invalid character 'q' looking for beginning of value"),
|
err: errors.New("invalid character 'q' looking for beginning of value"),
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,7 @@ SOFTWARE.
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
config "github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
@ -146,6 +147,16 @@ func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfigurati
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) {
|
func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.SpiloRunAsUser != nil {
|
||||||
|
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.SpiloRunAsGroup != nil {
|
||||||
|
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.SpiloFSGroup != nil {
|
if in.SpiloFSGroup != nil {
|
||||||
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
|
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
|
||||||
*out = new(int64)
|
*out = new(int64)
|
||||||
|
|
@ -168,6 +179,17 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
||||||
}
|
}
|
||||||
out.OAuthTokenSecretName = in.OAuthTokenSecretName
|
out.OAuthTokenSecretName = in.OAuthTokenSecretName
|
||||||
out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName
|
out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName
|
||||||
|
if in.InfrastructureRolesDefs != nil {
|
||||||
|
in, out := &in.InfrastructureRolesDefs, &out.InfrastructureRolesDefs
|
||||||
|
*out = make([]*config.InfrastructureRole, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
if (*in)[i] != nil {
|
||||||
|
in, out := &(*in)[i], &(*out)[i]
|
||||||
|
*out = new(config.InfrastructureRole)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if in.ClusterLabels != nil {
|
if in.ClusterLabels != nil {
|
||||||
in, out := &in.ClusterLabels, &out.ClusterLabels
|
in, out := &in.ClusterLabels, &out.ClusterLabels
|
||||||
*out = make(map[string]string, len(*in))
|
*out = make(map[string]string, len(*in))
|
||||||
|
|
@ -515,6 +537,16 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
*out = new(ConnectionPooler)
|
*out = new(ConnectionPooler)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.SpiloRunAsUser != nil {
|
||||||
|
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.SpiloRunAsGroup != nil {
|
||||||
|
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.SpiloFSGroup != nil {
|
if in.SpiloFSGroup != nil {
|
||||||
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
|
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
|
||||||
*out = new(int64)
|
*out = new(int64)
|
||||||
|
|
@ -567,7 +599,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
in.Clone.DeepCopyInto(&out.Clone)
|
if in.Clone != nil {
|
||||||
|
in, out := &in.Clone, &out.Clone
|
||||||
|
*out = new(CloneDescription)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
if in.Databases != nil {
|
if in.Databases != nil {
|
||||||
in, out := &in.Databases, &out.Databases
|
in, out := &in.Databases, &out.Databases
|
||||||
*out = make(map[string]string, len(*in))
|
*out = make(map[string]string, len(*in))
|
||||||
|
|
|
||||||
|
|
@ -124,6 +124,10 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
||||||
|
|
||||||
return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil
|
return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil
|
||||||
})
|
})
|
||||||
|
password_encryption, ok := pgSpec.Spec.PostgresqlParam.Parameters["password_encryption"]
|
||||||
|
if !ok {
|
||||||
|
password_encryption = "md5"
|
||||||
|
}
|
||||||
|
|
||||||
cluster := &Cluster{
|
cluster := &Cluster{
|
||||||
Config: cfg,
|
Config: cfg,
|
||||||
|
|
@ -135,7 +139,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
||||||
Secrets: make(map[types.UID]*v1.Secret),
|
Secrets: make(map[types.UID]*v1.Secret),
|
||||||
Services: make(map[PostgresRole]*v1.Service),
|
Services: make(map[PostgresRole]*v1.Service),
|
||||||
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
||||||
userSyncStrategy: users.DefaultUserSyncStrategy{},
|
userSyncStrategy: users.DefaultUserSyncStrategy{password_encryption},
|
||||||
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
||||||
podEventsQueue: podEventsQueue,
|
podEventsQueue: podEventsQueue,
|
||||||
KubeClient: kubeClient,
|
KubeClient: kubeClient,
|
||||||
|
|
@ -455,6 +459,15 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// we assume any change in priority happens by rolling out a new priority class
|
||||||
|
// changing the priority value in an existing class is not supproted
|
||||||
|
if c.Statefulset.Spec.Template.Spec.PriorityClassName != statefulSet.Spec.Template.Spec.PriorityClassName {
|
||||||
|
match = false
|
||||||
|
needsReplace = true
|
||||||
|
needsRollUpdate = true
|
||||||
|
reasons = append(reasons, "new statefulset's pod priority class in spec doesn't match the current one")
|
||||||
|
}
|
||||||
|
|
||||||
// lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image
|
// lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image
|
||||||
// until they are re-created for other reasons, for example node rotation
|
// until they are re-created for other reasons, for example node rotation
|
||||||
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) {
|
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) {
|
||||||
|
|
@ -797,10 +810,8 @@ func (c *Cluster) Delete() {
|
||||||
c.logger.Warningf("could not delete statefulset: %v", err)
|
c.logger.Warningf("could not delete statefulset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, obj := range c.Secrets {
|
if err := c.deleteSecrets(); err != nil {
|
||||||
if err := c.deleteSecret(obj); err != nil {
|
c.logger.Warningf("could not delete secrets: %v", err)
|
||||||
c.logger.Warningf("could not delete secret: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.deletePodDisruptionBudget(); err != nil {
|
if err := c.deletePodDisruptionBudget(); err != nil {
|
||||||
|
|
@ -957,32 +968,42 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for preparedDbName, preparedDB := range c.Spec.PreparedDatabases {
|
for preparedDbName, preparedDB := range c.Spec.PreparedDatabases {
|
||||||
|
// get list of prepared schemas to set in search_path
|
||||||
|
preparedSchemas := preparedDB.PreparedSchemas
|
||||||
|
if len(preparedDB.PreparedSchemas) == 0 {
|
||||||
|
preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}}
|
||||||
|
}
|
||||||
|
|
||||||
|
var searchPath strings.Builder
|
||||||
|
searchPath.WriteString(constants.DefaultSearchPath)
|
||||||
|
for preparedSchemaName := range preparedSchemas {
|
||||||
|
searchPath.WriteString(", " + preparedSchemaName)
|
||||||
|
}
|
||||||
|
|
||||||
// default roles per database
|
// default roles per database
|
||||||
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName); err != nil {
|
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String()); err != nil {
|
||||||
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
|
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
|
||||||
}
|
}
|
||||||
if preparedDB.DefaultUsers {
|
if preparedDB.DefaultUsers {
|
||||||
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName); err != nil {
|
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String()); err != nil {
|
||||||
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
|
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// default roles per database schema
|
// default roles per database schema
|
||||||
preparedSchemas := preparedDB.PreparedSchemas
|
|
||||||
if len(preparedDB.PreparedSchemas) == 0 {
|
|
||||||
preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}}
|
|
||||||
}
|
|
||||||
for preparedSchemaName, preparedSchema := range preparedSchemas {
|
for preparedSchemaName, preparedSchema := range preparedSchemas {
|
||||||
if preparedSchema.DefaultRoles == nil || *preparedSchema.DefaultRoles {
|
if preparedSchema.DefaultRoles == nil || *preparedSchema.DefaultRoles {
|
||||||
if err := c.initDefaultRoles(defaultRoles,
|
if err := c.initDefaultRoles(defaultRoles,
|
||||||
preparedDbName+constants.OwnerRoleNameSuffix,
|
preparedDbName+constants.OwnerRoleNameSuffix,
|
||||||
preparedDbName+"_"+preparedSchemaName); err != nil {
|
preparedDbName+"_"+preparedSchemaName,
|
||||||
|
constants.DefaultSearchPath+", "+preparedSchemaName); err != nil {
|
||||||
return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err)
|
return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err)
|
||||||
}
|
}
|
||||||
if preparedSchema.DefaultUsers {
|
if preparedSchema.DefaultUsers {
|
||||||
if err := c.initDefaultRoles(defaultUsers,
|
if err := c.initDefaultRoles(defaultUsers,
|
||||||
preparedDbName+constants.OwnerRoleNameSuffix,
|
preparedDbName+constants.OwnerRoleNameSuffix,
|
||||||
preparedDbName+"_"+preparedSchemaName); err != nil {
|
preparedDbName+"_"+preparedSchemaName,
|
||||||
|
constants.DefaultSearchPath+", "+preparedSchemaName); err != nil {
|
||||||
return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err)
|
return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -992,7 +1013,7 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string) error {
|
func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string, searchPath string) error {
|
||||||
|
|
||||||
for defaultRole, inherits := range defaultRoles {
|
for defaultRole, inherits := range defaultRoles {
|
||||||
|
|
||||||
|
|
@ -1021,6 +1042,7 @@ func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix
|
||||||
Password: util.RandomPassword(constants.PasswordLength),
|
Password: util.RandomPassword(constants.PasswordLength),
|
||||||
Flags: flags,
|
Flags: flags,
|
||||||
MemberOf: memberOf,
|
MemberOf: memberOf,
|
||||||
|
Parameters: map[string]string{"search_path": searchPath},
|
||||||
AdminRole: adminRole,
|
AdminRole: adminRole,
|
||||||
}
|
}
|
||||||
if currentRole, present := c.pgUsers[roleName]; present {
|
if currentRole, present := c.pgUsers[roleName]; present {
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
|
@ -20,7 +21,6 @@ import (
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
pkgspec "github.com/zalando/postgres-operator/pkg/spec"
|
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
|
|
@ -557,6 +557,8 @@ func (c *Cluster) generatePodTemplate(
|
||||||
initContainers []v1.Container,
|
initContainers []v1.Container,
|
||||||
sidecarContainers []v1.Container,
|
sidecarContainers []v1.Container,
|
||||||
tolerationsSpec *[]v1.Toleration,
|
tolerationsSpec *[]v1.Toleration,
|
||||||
|
spiloRunAsUser *int64,
|
||||||
|
spiloRunAsGroup *int64,
|
||||||
spiloFSGroup *int64,
|
spiloFSGroup *int64,
|
||||||
nodeAffinity *v1.Affinity,
|
nodeAffinity *v1.Affinity,
|
||||||
terminateGracePeriod int64,
|
terminateGracePeriod int64,
|
||||||
|
|
@ -576,6 +578,14 @@ func (c *Cluster) generatePodTemplate(
|
||||||
containers = append(containers, sidecarContainers...)
|
containers = append(containers, sidecarContainers...)
|
||||||
securityContext := v1.PodSecurityContext{}
|
securityContext := v1.PodSecurityContext{}
|
||||||
|
|
||||||
|
if spiloRunAsUser != nil {
|
||||||
|
securityContext.RunAsUser = spiloRunAsUser
|
||||||
|
}
|
||||||
|
|
||||||
|
if spiloRunAsGroup != nil {
|
||||||
|
securityContext.RunAsGroup = spiloRunAsGroup
|
||||||
|
}
|
||||||
|
|
||||||
if spiloFSGroup != nil {
|
if spiloFSGroup != nil {
|
||||||
securityContext.FSGroup = spiloFSGroup
|
securityContext.FSGroup = spiloFSGroup
|
||||||
}
|
}
|
||||||
|
|
@ -715,6 +725,30 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "SPILO_CONFIGURATION", Value: spiloConfiguration})
|
envVars = append(envVars, v1.EnvVar{Name: "SPILO_CONFIGURATION", Value: spiloConfiguration})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.patroniUsesKubernetes() {
|
||||||
|
envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"})
|
||||||
|
} else {
|
||||||
|
envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost})
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.patroniKubernetesUseConfigMaps() {
|
||||||
|
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"})
|
||||||
|
}
|
||||||
|
|
||||||
|
if cloneDescription != nil && cloneDescription.ClusterName != "" {
|
||||||
|
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Spec.StandbyCluster != nil {
|
||||||
|
envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add vars taken from pod_environment_configmap and pod_environment_secret first
|
||||||
|
// (to allow them to override the globals set in the operator config)
|
||||||
|
if len(customPodEnvVarsList) > 0 {
|
||||||
|
envVars = append(envVars, customPodEnvVarsList...)
|
||||||
|
}
|
||||||
|
|
||||||
if c.OpConfig.WALES3Bucket != "" {
|
if c.OpConfig.WALES3Bucket != "" {
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
|
envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
|
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
|
||||||
|
|
@ -737,28 +771,6 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""})
|
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.patroniUsesKubernetes() {
|
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"})
|
|
||||||
} else {
|
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost})
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.patroniKubernetesUseConfigMaps() {
|
|
||||||
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"})
|
|
||||||
}
|
|
||||||
|
|
||||||
if cloneDescription.ClusterName != "" {
|
|
||||||
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Spec.StandbyCluster != nil {
|
|
||||||
envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(customPodEnvVarsList) > 0 {
|
|
||||||
envVars = append(envVars, customPodEnvVarsList...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return envVars
|
return envVars
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -777,13 +789,81 @@ func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus.
|
||||||
result = append(result, input[i])
|
result = append(result, input[i])
|
||||||
} else if names[va.Name] == 1 {
|
} else if names[va.Name] == 1 {
|
||||||
names[va.Name]++
|
names[va.Name]++
|
||||||
|
|
||||||
|
// Some variables (those to configure the WAL_ and LOG_ shipping) may be overwritten, only log as info
|
||||||
|
if strings.HasPrefix(va.Name, "WAL_") || strings.HasPrefix(va.Name, "LOG_") {
|
||||||
|
logger.Infof("global variable %q has been overwritten by configmap/secret for container %q",
|
||||||
|
va.Name, containerName)
|
||||||
|
} else {
|
||||||
logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored",
|
logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored",
|
||||||
va.Name, containerName)
|
va.Name, containerName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return list of variables the pod recieved from the configured ConfigMap
|
||||||
|
func (c *Cluster) getPodEnvironmentConfigMapVariables() ([]v1.EnvVar, error) {
|
||||||
|
configMapPodEnvVarsList := make([]v1.EnvVar, 0)
|
||||||
|
|
||||||
|
if c.OpConfig.PodEnvironmentConfigMap.Name == "" {
|
||||||
|
return configMapPodEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cm, err := c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get(
|
||||||
|
context.TODO(),
|
||||||
|
c.OpConfig.PodEnvironmentConfigMap.Name,
|
||||||
|
metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
// if not found, try again using the cluster's namespace if it's different (old behavior)
|
||||||
|
if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace {
|
||||||
|
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(
|
||||||
|
context.TODO(),
|
||||||
|
c.OpConfig.PodEnvironmentConfigMap.Name,
|
||||||
|
metav1.GetOptions{})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range cm.Data {
|
||||||
|
configMapPodEnvVarsList = append(configMapPodEnvVarsList, v1.EnvVar{Name: k, Value: v})
|
||||||
|
}
|
||||||
|
return configMapPodEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return list of variables the pod recieved from the configured Secret
|
||||||
|
func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) {
|
||||||
|
secretPodEnvVarsList := make([]v1.EnvVar, 0)
|
||||||
|
|
||||||
|
if c.OpConfig.PodEnvironmentSecret == "" {
|
||||||
|
return secretPodEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
secret, err := c.KubeClient.Secrets(c.OpConfig.PodEnvironmentSecret).Get(
|
||||||
|
context.TODO(),
|
||||||
|
c.OpConfig.PodEnvironmentSecret,
|
||||||
|
metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read Secret PodEnvironmentSecretName: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range secret.Data {
|
||||||
|
secretPodEnvVarsList = append(secretPodEnvVarsList,
|
||||||
|
v1.EnvVar{Name: k, ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: c.OpConfig.PodEnvironmentSecret,
|
||||||
|
},
|
||||||
|
Key: k,
|
||||||
|
},
|
||||||
|
}})
|
||||||
|
}
|
||||||
|
|
||||||
|
return secretPodEnvVarsList, nil
|
||||||
|
}
|
||||||
|
|
||||||
func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container {
|
func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container {
|
||||||
name := sidecar.Name
|
name := sidecar.Name
|
||||||
if name == "" {
|
if name == "" {
|
||||||
|
|
@ -943,32 +1023,23 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
initContainers = spec.InitContainers
|
initContainers = spec.InitContainers
|
||||||
}
|
}
|
||||||
|
|
||||||
customPodEnvVarsList := make([]v1.EnvVar, 0)
|
// fetch env vars from custom ConfigMap
|
||||||
|
configMapEnvVarsList, err := c.getPodEnvironmentConfigMapVariables()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if c.OpConfig.PodEnvironmentConfigMap != (pkgspec.NamespacedName{}) {
|
// fetch env vars from custom ConfigMap
|
||||||
var cm *v1.ConfigMap
|
secretEnvVarsList, err := c.getPodEnvironmentSecretVariables()
|
||||||
cm, err = c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get(
|
|
||||||
context.TODO(),
|
|
||||||
c.OpConfig.PodEnvironmentConfigMap.Name,
|
|
||||||
metav1.GetOptions{})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// if not found, try again using the cluster's namespace if it's different (old behavior)
|
return nil, err
|
||||||
if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace {
|
|
||||||
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(
|
|
||||||
context.TODO(),
|
|
||||||
c.OpConfig.PodEnvironmentConfigMap.Name,
|
|
||||||
metav1.GetOptions{})
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for k, v := range cm.Data {
|
|
||||||
customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// concat all custom pod env vars and sort them
|
||||||
|
customPodEnvVarsList := append(configMapEnvVarsList, secretEnvVarsList...)
|
||||||
sort.Slice(customPodEnvVarsList,
|
sort.Slice(customPodEnvVarsList,
|
||||||
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })
|
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })
|
||||||
}
|
|
||||||
if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" {
|
if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" {
|
||||||
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
|
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
|
||||||
}
|
}
|
||||||
|
|
@ -1004,7 +1075,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
spiloEnvVars := c.generateSpiloPodEnvVars(
|
spiloEnvVars := c.generateSpiloPodEnvVars(
|
||||||
c.Postgresql.GetUID(),
|
c.Postgresql.GetUID(),
|
||||||
spiloConfiguration,
|
spiloConfiguration,
|
||||||
&spec.Clone,
|
spec.Clone,
|
||||||
spec.StandbyCluster,
|
spec.StandbyCluster,
|
||||||
customPodEnvVarsList,
|
customPodEnvVarsList,
|
||||||
)
|
)
|
||||||
|
|
@ -1012,7 +1083,17 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
// pickup the docker image for the spilo container
|
// pickup the docker image for the spilo container
|
||||||
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
|
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
|
||||||
|
|
||||||
// determine the FSGroup for the spilo pod
|
// determine the User, Group and FSGroup for the spilo pod
|
||||||
|
effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser
|
||||||
|
if spec.SpiloRunAsUser != nil {
|
||||||
|
effectiveRunAsUser = spec.SpiloRunAsUser
|
||||||
|
}
|
||||||
|
|
||||||
|
effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup
|
||||||
|
if spec.SpiloRunAsGroup != nil {
|
||||||
|
effectiveRunAsGroup = spec.SpiloRunAsGroup
|
||||||
|
}
|
||||||
|
|
||||||
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
|
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
|
||||||
if spec.SpiloFSGroup != nil {
|
if spec.SpiloFSGroup != nil {
|
||||||
effectiveFSGroup = spec.SpiloFSGroup
|
effectiveFSGroup = spec.SpiloFSGroup
|
||||||
|
|
@ -1156,6 +1237,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
initContainers,
|
initContainers,
|
||||||
sidecarContainers,
|
sidecarContainers,
|
||||||
&tolerationSpec,
|
&tolerationSpec,
|
||||||
|
effectiveRunAsUser,
|
||||||
|
effectiveRunAsGroup,
|
||||||
effectiveFSGroup,
|
effectiveFSGroup,
|
||||||
nodeAffinity(c.OpConfig.NodeReadinessLabel),
|
nodeAffinity(c.OpConfig.NodeReadinessLabel),
|
||||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||||
|
|
@ -1558,6 +1641,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
|
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
|
||||||
|
serviceSpec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(c.OpConfig.ExternalTrafficPolicy)
|
||||||
serviceSpec.Type = v1.ServiceTypeLoadBalancer
|
serviceSpec.Type = v1.ServiceTypeLoadBalancer
|
||||||
} else if role == Replica {
|
} else if role == Replica {
|
||||||
// before PR #258, the replica service was only created if allocated a LB
|
// before PR #258, the replica service was only created if allocated a LB
|
||||||
|
|
@ -1835,6 +1919,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
||||||
[]v1.Container{},
|
[]v1.Container{},
|
||||||
&[]v1.Toleration{},
|
&[]v1.Toleration{},
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
nodeAffinity(c.OpConfig.NodeReadinessLabel),
|
nodeAffinity(c.OpConfig.NodeReadinessLabel),
|
||||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||||
c.OpConfig.PodServiceAccountName,
|
c.OpConfig.PodServiceAccountName,
|
||||||
|
|
|
||||||
|
|
@ -1,15 +1,18 @@
|
||||||
package cluster
|
package cluster
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
|
|
@ -22,6 +25,7 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// For testing purposes
|
// For testing purposes
|
||||||
|
|
@ -116,17 +120,17 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
|
||||||
|
|
||||||
expectedValuesGSBucket := []ExpectedValue{
|
expectedValuesGSBucket := []ExpectedValue{
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 14,
|
envIndex: 15,
|
||||||
envVarConstant: "WAL_GS_BUCKET",
|
envVarConstant: "WAL_GS_BUCKET",
|
||||||
envVarValue: "wale-gs-bucket",
|
envVarValue: "wale-gs-bucket",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 15,
|
envIndex: 16,
|
||||||
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
|
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
|
||||||
envVarValue: "/SomeUUID",
|
envVarValue: "/SomeUUID",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 16,
|
envIndex: 17,
|
||||||
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
|
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
|
||||||
envVarValue: "",
|
envVarValue: "",
|
||||||
},
|
},
|
||||||
|
|
@ -134,22 +138,22 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
|
||||||
|
|
||||||
expectedValuesGCPCreds := []ExpectedValue{
|
expectedValuesGCPCreds := []ExpectedValue{
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 14,
|
envIndex: 15,
|
||||||
envVarConstant: "WAL_GS_BUCKET",
|
envVarConstant: "WAL_GS_BUCKET",
|
||||||
envVarValue: "wale-gs-bucket",
|
envVarValue: "wale-gs-bucket",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 15,
|
envIndex: 16,
|
||||||
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
|
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
|
||||||
envVarValue: "/SomeUUID",
|
envVarValue: "/SomeUUID",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 16,
|
envIndex: 17,
|
||||||
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
|
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
|
||||||
envVarValue: "",
|
envVarValue: "",
|
||||||
},
|
},
|
||||||
ExpectedValue{
|
ExpectedValue{
|
||||||
envIndex: 17,
|
envIndex: 18,
|
||||||
envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS",
|
envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS",
|
||||||
envVarValue: "some_path_to_credentials",
|
envVarValue: "some_path_to_credentials",
|
||||||
},
|
},
|
||||||
|
|
@ -713,6 +717,218 @@ func TestSecretVolume(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
testPodEnvironmentConfigMapName = "pod_env_cm"
|
||||||
|
testPodEnvironmentSecretName = "pod_env_sc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockSecret struct {
|
||||||
|
v1core.SecretInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockConfigMap struct {
|
||||||
|
v1core.ConfigMapInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) {
|
||||||
|
if name != testPodEnvironmentSecretName {
|
||||||
|
return nil, fmt.Errorf("Secret PodEnvironmentSecret not found")
|
||||||
|
}
|
||||||
|
secret := &v1.Secret{}
|
||||||
|
secret.Name = testPodEnvironmentSecretName
|
||||||
|
secret.Data = map[string][]byte{
|
||||||
|
"minio_access_key": []byte("alpha"),
|
||||||
|
"minio_secret_key": []byte("beta"),
|
||||||
|
}
|
||||||
|
return secret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) {
|
||||||
|
if name != testPodEnvironmentConfigMapName {
|
||||||
|
return nil, fmt.Errorf("NotFound")
|
||||||
|
}
|
||||||
|
configmap := &v1.ConfigMap{}
|
||||||
|
configmap.Name = testPodEnvironmentConfigMapName
|
||||||
|
configmap.Data = map[string]string{
|
||||||
|
"foo1": "bar1",
|
||||||
|
"foo2": "bar2",
|
||||||
|
}
|
||||||
|
return configmap, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockSecretGetter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockConfigMapsGetter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockSecretGetter) Secrets(namespace string) v1core.SecretInterface {
|
||||||
|
return &mockSecret{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MockConfigMapsGetter) ConfigMaps(namespace string) v1core.ConfigMapInterface {
|
||||||
|
return &mockConfigMap{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockKubernetesClient() k8sutil.KubernetesClient {
|
||||||
|
return k8sutil.KubernetesClient{
|
||||||
|
SecretsGetter: &MockSecretGetter{},
|
||||||
|
ConfigMapsGetter: &MockConfigMapsGetter{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newMockCluster(opConfig config.Config) *Cluster {
|
||||||
|
cluster := &Cluster{
|
||||||
|
Config: Config{OpConfig: opConfig},
|
||||||
|
KubeClient: newMockKubernetesClient(),
|
||||||
|
}
|
||||||
|
return cluster
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodEnvironmentConfigMapVariables(t *testing.T) {
|
||||||
|
testName := "TestPodEnvironmentConfigMapVariables"
|
||||||
|
tests := []struct {
|
||||||
|
subTest string
|
||||||
|
opConfig config.Config
|
||||||
|
envVars []v1.EnvVar
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
subTest: "no PodEnvironmentConfigMap",
|
||||||
|
envVars: []v1.EnvVar{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "missing PodEnvironmentConfigMap",
|
||||||
|
opConfig: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
PodEnvironmentConfigMap: spec.NamespacedName{
|
||||||
|
Name: "idonotexist",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("could not read PodEnvironmentConfigMap: NotFound"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "simple PodEnvironmentConfigMap",
|
||||||
|
opConfig: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
PodEnvironmentConfigMap: spec.NamespacedName{
|
||||||
|
Name: testPodEnvironmentConfigMapName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
envVars: []v1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "foo1",
|
||||||
|
Value: "bar1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "foo2",
|
||||||
|
Value: "bar2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
c := newMockCluster(tt.opConfig)
|
||||||
|
vars, err := c.getPodEnvironmentConfigMapVariables()
|
||||||
|
sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name })
|
||||||
|
if !reflect.DeepEqual(vars, tt.envVars) {
|
||||||
|
t.Errorf("%s %s: expected `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.envVars, vars)
|
||||||
|
}
|
||||||
|
if tt.err != nil {
|
||||||
|
if err.Error() != tt.err.Error() {
|
||||||
|
t.Errorf("%s %s: expected error `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.err, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s %s: expected no error but got error: `%v`",
|
||||||
|
testName, tt.subTest, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test if the keys of an existing secret are properly referenced
|
||||||
|
func TestPodEnvironmentSecretVariables(t *testing.T) {
|
||||||
|
testName := "TestPodEnvironmentSecretVariables"
|
||||||
|
tests := []struct {
|
||||||
|
subTest string
|
||||||
|
opConfig config.Config
|
||||||
|
envVars []v1.EnvVar
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
subTest: "No PodEnvironmentSecret configured",
|
||||||
|
envVars: []v1.EnvVar{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Secret referenced by PodEnvironmentSecret does not exist",
|
||||||
|
opConfig: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
PodEnvironmentSecret: "idonotexist",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("could not read Secret PodEnvironmentSecretName: Secret PodEnvironmentSecret not found"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "Pod environment vars reference all keys from secret configured by PodEnvironmentSecret",
|
||||||
|
opConfig: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
PodEnvironmentSecret: testPodEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
envVars: []v1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "minio_access_key",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: testPodEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
Key: "minio_access_key",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "minio_secret_key",
|
||||||
|
ValueFrom: &v1.EnvVarSource{
|
||||||
|
SecretKeyRef: &v1.SecretKeySelector{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
|
Name: testPodEnvironmentSecretName,
|
||||||
|
},
|
||||||
|
Key: "minio_secret_key",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
c := newMockCluster(tt.opConfig)
|
||||||
|
vars, err := c.getPodEnvironmentSecretVariables()
|
||||||
|
sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name })
|
||||||
|
if !reflect.DeepEqual(vars, tt.envVars) {
|
||||||
|
t.Errorf("%s %s: expected `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.envVars, vars)
|
||||||
|
}
|
||||||
|
if tt.err != nil {
|
||||||
|
if err.Error() != tt.err.Error() {
|
||||||
|
t.Errorf("%s %s: expected error `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.err, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s %s: expected no error but got error: `%v`",
|
||||||
|
testName, tt.subTest, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
||||||
cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"]
|
cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"]
|
||||||
if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest {
|
if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest {
|
||||||
|
|
@ -1086,6 +1302,8 @@ func TestTLS(t *testing.T) {
|
||||||
var err error
|
var err error
|
||||||
var spec acidv1.PostgresSpec
|
var spec acidv1.PostgresSpec
|
||||||
var cluster *Cluster
|
var cluster *Cluster
|
||||||
|
var spiloRunAsUser = int64(101)
|
||||||
|
var spiloRunAsGroup = int64(103)
|
||||||
var spiloFSGroup = int64(103)
|
var spiloFSGroup = int64(103)
|
||||||
var additionalVolumes = spec.AdditionalVolumes
|
var additionalVolumes = spec.AdditionalVolumes
|
||||||
|
|
||||||
|
|
@ -1113,6 +1331,8 @@ func TestTLS(t *testing.T) {
|
||||||
ReplicationUsername: replicationUserName,
|
ReplicationUsername: replicationUserName,
|
||||||
},
|
},
|
||||||
Resources: config.Resources{
|
Resources: config.Resources{
|
||||||
|
SpiloRunAsUser: &spiloRunAsUser,
|
||||||
|
SpiloRunAsGroup: &spiloRunAsGroup,
|
||||||
SpiloFSGroup: &spiloFSGroup,
|
SpiloFSGroup: &spiloFSGroup,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -1526,3 +1746,83 @@ func TestSidecars(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGenerateService(t *testing.T) {
|
||||||
|
var spec acidv1.PostgresSpec
|
||||||
|
var cluster *Cluster
|
||||||
|
var enableLB bool = true
|
||||||
|
spec = acidv1.PostgresSpec{
|
||||||
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
|
Resources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
|
},
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
Sidecars: []acidv1.Sidecar{
|
||||||
|
acidv1.Sidecar{
|
||||||
|
Name: "cluster-specific-sidecar",
|
||||||
|
},
|
||||||
|
acidv1.Sidecar{
|
||||||
|
Name: "cluster-specific-sidecar-with-resources",
|
||||||
|
Resources: acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
acidv1.Sidecar{
|
||||||
|
Name: "replace-sidecar",
|
||||||
|
DockerImage: "overwrite-image",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
EnableMasterLoadBalancer: &enableLB,
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster = New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
ProtectedRoles: []string{"admin"},
|
||||||
|
Auth: config.Auth{
|
||||||
|
SuperUsername: superUserName,
|
||||||
|
ReplicationUsername: replicationUserName,
|
||||||
|
},
|
||||||
|
Resources: config.Resources{
|
||||||
|
DefaultCPURequest: "200m",
|
||||||
|
DefaultCPULimit: "500m",
|
||||||
|
DefaultMemoryRequest: "0.7Gi",
|
||||||
|
DefaultMemoryLimit: "1.3Gi",
|
||||||
|
},
|
||||||
|
SidecarImages: map[string]string{
|
||||||
|
"deprecated-global-sidecar": "image:123",
|
||||||
|
},
|
||||||
|
SidecarContainers: []v1.Container{
|
||||||
|
v1.Container{
|
||||||
|
Name: "global-sidecar",
|
||||||
|
},
|
||||||
|
// will be replaced by a cluster specific sidecar with the same name
|
||||||
|
v1.Container{
|
||||||
|
Name: "replace-sidecar",
|
||||||
|
Image: "replaced-image",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Scalyr: config.Scalyr{
|
||||||
|
ScalyrAPIKey: "abc",
|
||||||
|
ScalyrImage: "scalyr-image",
|
||||||
|
ScalyrCPURequest: "220m",
|
||||||
|
ScalyrCPULimit: "520m",
|
||||||
|
ScalyrMemoryRequest: "0.9Gi",
|
||||||
|
// ise default memory limit
|
||||||
|
},
|
||||||
|
ExternalTrafficPolicy: "Cluster",
|
||||||
|
},
|
||||||
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
|
service := cluster.generateService(Master, &spec)
|
||||||
|
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeCluster, service.Spec.ExternalTrafficPolicy)
|
||||||
|
cluster.OpConfig.ExternalTrafficPolicy = "Local"
|
||||||
|
service = cluster.generateService(Master, &spec)
|
||||||
|
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -207,8 +207,6 @@ func (c *Cluster) deleteConnectionPooler() (err error) {
|
||||||
serviceName = service.Name
|
serviceName = service.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
// set delete propagation policy to foreground, so that all the dependant
|
|
||||||
// will be deleted.
|
|
||||||
err = c.KubeClient.
|
err = c.KubeClient.
|
||||||
Services(c.Namespace).
|
Services(c.Namespace).
|
||||||
Delete(context.TODO(), serviceName, options)
|
Delete(context.TODO(), serviceName, options)
|
||||||
|
|
@ -221,6 +219,21 @@ func (c *Cluster) deleteConnectionPooler() (err error) {
|
||||||
|
|
||||||
c.logger.Infof("Connection pooler service %q has been deleted", serviceName)
|
c.logger.Infof("Connection pooler service %q has been deleted", serviceName)
|
||||||
|
|
||||||
|
// Repeat the same for the secret object
|
||||||
|
secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User)
|
||||||
|
|
||||||
|
secret, err := c.KubeClient.
|
||||||
|
Secrets(c.Namespace).
|
||||||
|
Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Debugf("could not get connection pooler secret %q: %v", secretName, err)
|
||||||
|
} else {
|
||||||
|
if err = c.deleteSecret(secret.UID, *secret); err != nil {
|
||||||
|
return fmt.Errorf("could not delete pooler secret: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c.ConnectionPooler = nil
|
c.ConnectionPooler = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -725,17 +738,37 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) deleteSecret(secret *v1.Secret) error {
|
func (c *Cluster) deleteSecrets() error {
|
||||||
c.setProcessName("deleting secret %q", util.NameFromMeta(secret.ObjectMeta))
|
c.setProcessName("deleting secrets")
|
||||||
c.logger.Debugf("deleting secret %q", util.NameFromMeta(secret.ObjectMeta))
|
var errors []string
|
||||||
|
errorCount := 0
|
||||||
|
for uid, secret := range c.Secrets {
|
||||||
|
err := c.deleteSecret(uid, *secret)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("%v", err))
|
||||||
|
errorCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if errorCount > 0 {
|
||||||
|
return fmt.Errorf("could not delete all secrets: %v", errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) deleteSecret(uid types.UID, secret v1.Secret) error {
|
||||||
|
c.setProcessName("deleting secret")
|
||||||
|
secretName := util.NameFromMeta(secret.ObjectMeta)
|
||||||
|
c.logger.Debugf("deleting secret %q", secretName)
|
||||||
err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions)
|
err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("could not delete secret %q: %v", secretName, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("secret %q has been deleted", util.NameFromMeta(secret.ObjectMeta))
|
c.logger.Infof("secret %q has been deleted", secretName)
|
||||||
delete(c.Secrets, secret.UID)
|
c.Secrets[uid] = nil
|
||||||
|
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) createRoles() (err error) {
|
func (c *Cluster) createRoles() (err error) {
|
||||||
|
|
|
||||||
|
|
@ -57,6 +57,13 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.OpConfig.StorageResizeMode == "pvc" {
|
||||||
|
c.logger.Debugf("syncing persistent volume claims")
|
||||||
|
if err = c.syncVolumeClaims(); err != nil {
|
||||||
|
err = fmt.Errorf("could not sync persistent volume claims: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if c.OpConfig.StorageResizeMode == "ebs" {
|
||||||
// potentially enlarge volumes before changing the statefulset. By doing that
|
// potentially enlarge volumes before changing the statefulset. By doing that
|
||||||
// in this order we make sure the operator is not stuck waiting for a pod that
|
// in this order we make sure the operator is not stuck waiting for a pod that
|
||||||
// cannot start because it ran out of disk space.
|
// cannot start because it ran out of disk space.
|
||||||
|
|
@ -68,6 +75,9 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
err = fmt.Errorf("could not sync persistent volumes: %v", err)
|
err = fmt.Errorf("could not sync persistent volumes: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.")
|
||||||
|
}
|
||||||
|
|
||||||
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
|
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
|
||||||
err = fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
err = fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||||
|
|
@ -490,6 +500,7 @@ func (c *Cluster) syncSecrets() error {
|
||||||
c.logger.Warningf("secret %q does not contain the role %q", secretSpec.Name, secretUsername)
|
c.logger.Warningf("secret %q does not contain the role %q", secretSpec.Name, secretUsername)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
c.Secrets[secret.UID] = secret
|
||||||
c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta))
|
c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta))
|
||||||
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
||||||
secretUsername = constants.SuperuserKeyName
|
secretUsername = constants.SuperuserKeyName
|
||||||
|
|
@ -571,6 +582,27 @@ func (c *Cluster) syncRoles() (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// syncVolumeClaims reads all persistent volume claims and checks that their size matches the one declared in the statefulset.
|
||||||
|
func (c *Cluster) syncVolumeClaims() error {
|
||||||
|
c.setProcessName("syncing volume claims")
|
||||||
|
|
||||||
|
act, err := c.volumeClaimsNeedResizing(c.Spec.Volume)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not compare size of the volume claims: %v", err)
|
||||||
|
}
|
||||||
|
if !act {
|
||||||
|
c.logger.Infof("volume claims don't require changes")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := c.resizeVolumeClaims(c.Spec.Volume); err != nil {
|
||||||
|
return fmt.Errorf("could not sync volume claims: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Infof("volume claims have been synced successfully")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// syncVolumes reads all persistent volumes and checks that their size matches the one declared in the statefulset.
|
// syncVolumes reads all persistent volumes and checks that their size matches the one declared in the statefulset.
|
||||||
func (c *Cluster) syncVolumes() error {
|
func (c *Cluster) syncVolumes() error {
|
||||||
c.setProcessName("syncing volumes")
|
c.setProcessName("syncing volumes")
|
||||||
|
|
@ -664,12 +696,8 @@ func (c *Cluster) syncPreparedDatabases() error {
|
||||||
if err := c.initDbConnWithName(preparedDbName); err != nil {
|
if err := c.initDbConnWithName(preparedDbName); err != nil {
|
||||||
return fmt.Errorf("could not init connection to database %s: %v", preparedDbName, err)
|
return fmt.Errorf("could not init connection to database %s: %v", preparedDbName, err)
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
if err := c.closeDbConn(); err != nil {
|
|
||||||
c.logger.Errorf("could not close database connection: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
|
c.logger.Debugf("syncing prepared database %q", preparedDbName)
|
||||||
// now, prepare defined schemas
|
// now, prepare defined schemas
|
||||||
preparedSchemas := preparedDB.PreparedSchemas
|
preparedSchemas := preparedDB.PreparedSchemas
|
||||||
if len(preparedDB.PreparedSchemas) == 0 {
|
if len(preparedDB.PreparedSchemas) == 0 {
|
||||||
|
|
@ -683,6 +711,10 @@ func (c *Cluster) syncPreparedDatabases() error {
|
||||||
if err := c.syncExtensions(preparedDB.Extensions); err != nil {
|
if err := c.syncExtensions(preparedDB.Extensions); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := c.closeDbConn(); err != nil {
|
||||||
|
c.logger.Errorf("could not close database connection: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,8 @@ func noEmptySync(cluster *Cluster, err error, reason SyncReason) error {
|
||||||
|
|
||||||
func TestConnectionPoolerSynchronization(t *testing.T) {
|
func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
testName := "Test connection pooler synchronization"
|
testName := "Test connection pooler synchronization"
|
||||||
var cluster = New(
|
newCluster := func() *Cluster {
|
||||||
|
return New(
|
||||||
Config{
|
Config{
|
||||||
OpConfig: config.Config{
|
OpConfig: config.Config{
|
||||||
ProtectedRoles: []string{"admin"},
|
ProtectedRoles: []string{"admin"},
|
||||||
|
|
@ -80,6 +81,8 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
}
|
||||||
|
cluster := newCluster()
|
||||||
|
|
||||||
cluster.Statefulset = &appsv1.StatefulSet{
|
cluster.Statefulset = &appsv1.StatefulSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
|
@ -87,20 +90,20 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
clusterMissingObjects := *cluster
|
clusterMissingObjects := newCluster()
|
||||||
clusterMissingObjects.KubeClient = k8sutil.ClientMissingObjects()
|
clusterMissingObjects.KubeClient = k8sutil.ClientMissingObjects()
|
||||||
|
|
||||||
clusterMock := *cluster
|
clusterMock := newCluster()
|
||||||
clusterMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
clusterMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||||
|
|
||||||
clusterDirtyMock := *cluster
|
clusterDirtyMock := newCluster()
|
||||||
clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||||
clusterDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{
|
clusterDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{
|
||||||
Deployment: &appsv1.Deployment{},
|
Deployment: &appsv1.Deployment{},
|
||||||
Service: &v1.Service{},
|
Service: &v1.Service{},
|
||||||
}
|
}
|
||||||
|
|
||||||
clusterNewDefaultsMock := *cluster
|
clusterNewDefaultsMock := newCluster()
|
||||||
clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
|
@ -124,7 +127,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMissingObjects,
|
cluster: clusterMissingObjects,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreSaved,
|
check: objectsAreSaved,
|
||||||
|
|
@ -139,7 +142,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
EnableConnectionPooler: boolToPointer(true),
|
EnableConnectionPooler: boolToPointer(true),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMissingObjects,
|
cluster: clusterMissingObjects,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreSaved,
|
check: objectsAreSaved,
|
||||||
|
|
@ -154,7 +157,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMissingObjects,
|
cluster: clusterMissingObjects,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreSaved,
|
check: objectsAreSaved,
|
||||||
|
|
@ -169,7 +172,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
newSpec: &acidv1.Postgresql{
|
newSpec: &acidv1.Postgresql{
|
||||||
Spec: acidv1.PostgresSpec{},
|
Spec: acidv1.PostgresSpec{},
|
||||||
},
|
},
|
||||||
cluster: &clusterMock,
|
cluster: clusterMock,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreDeleted,
|
check: objectsAreDeleted,
|
||||||
|
|
@ -182,7 +185,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
newSpec: &acidv1.Postgresql{
|
newSpec: &acidv1.Postgresql{
|
||||||
Spec: acidv1.PostgresSpec{},
|
Spec: acidv1.PostgresSpec{},
|
||||||
},
|
},
|
||||||
cluster: &clusterDirtyMock,
|
cluster: clusterDirtyMock,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: objectsAreDeleted,
|
check: objectsAreDeleted,
|
||||||
|
|
@ -203,7 +206,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMock,
|
cluster: clusterMock,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: deploymentUpdated,
|
check: deploymentUpdated,
|
||||||
|
|
@ -220,7 +223,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterNewDefaultsMock,
|
cluster: clusterNewDefaultsMock,
|
||||||
defaultImage: "pooler:2.0",
|
defaultImage: "pooler:2.0",
|
||||||
defaultInstances: 2,
|
defaultInstances: 2,
|
||||||
check: deploymentUpdated,
|
check: deploymentUpdated,
|
||||||
|
|
@ -239,7 +242,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &clusterMock,
|
cluster: clusterMock,
|
||||||
defaultImage: "pooler:1.0",
|
defaultImage: "pooler:1.0",
|
||||||
defaultInstances: 1,
|
defaultInstances: 1,
|
||||||
check: noEmptySync,
|
check: noEmptySync,
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,35 @@ func (c *Cluster) deletePersistentVolumeClaims() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) resizeVolumeClaims(newVolume acidv1.Volume) error {
|
||||||
|
c.logger.Debugln("resizing PVCs")
|
||||||
|
pvcs, err := c.listPersistentVolumeClaims()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newQuantity, err := resource.ParseQuantity(newVolume.Size)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not parse volume size: %v", err)
|
||||||
|
}
|
||||||
|
_, newSize, err := c.listVolumesWithManifestSize(newVolume)
|
||||||
|
for _, pvc := range pvcs {
|
||||||
|
volumeSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage])
|
||||||
|
if volumeSize >= newSize {
|
||||||
|
if volumeSize > newSize {
|
||||||
|
c.logger.Warningf("cannot shrink persistent volume")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pvc.Spec.Resources.Requests[v1.ResourceStorage] = newQuantity
|
||||||
|
c.logger.Debugf("updating persistent volume claim definition for volume %q", pvc.Name)
|
||||||
|
if _, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}); err != nil {
|
||||||
|
return fmt.Errorf("could not update persistent volume claim: %q", err)
|
||||||
|
}
|
||||||
|
c.logger.Debugf("successfully updated persistent volume claim %q", pvc.Name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) {
|
func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) {
|
||||||
result := make([]*v1.PersistentVolume, 0)
|
result := make([]*v1.PersistentVolume, 0)
|
||||||
|
|
||||||
|
|
@ -150,7 +179,7 @@ func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.Volu
|
||||||
c.logger.Debugf("successfully updated persistent volume %q", pv.Name)
|
c.logger.Debugf("successfully updated persistent volume %q", pv.Name)
|
||||||
}
|
}
|
||||||
if !compatible {
|
if !compatible {
|
||||||
c.logger.Warningf("volume %q is incompatible with all available resizing providers", pv.Name)
|
c.logger.Warningf("volume %q is incompatible with all available resizing providers, consider switching storage_resize_mode to pvc or off", pv.Name)
|
||||||
totalIncompatible++
|
totalIncompatible++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -160,6 +189,25 @@ func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.Volu
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) volumeClaimsNeedResizing(newVolume acidv1.Volume) (bool, error) {
|
||||||
|
newSize, err := resource.ParseQuantity(newVolume.Size)
|
||||||
|
manifestSize := quantityToGigabyte(newSize)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("could not parse volume size from the manifest: %v", err)
|
||||||
|
}
|
||||||
|
pvcs, err := c.listPersistentVolumeClaims()
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("could not receive persistent volume claims: %v", err)
|
||||||
|
}
|
||||||
|
for _, pvc := range pvcs {
|
||||||
|
currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage])
|
||||||
|
if currentSize != manifestSize {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) volumesNeedResizing(newVolume acidv1.Volume) (bool, error) {
|
func (c *Cluster) volumesNeedResizing(newVolume acidv1.Volume) (bool, error) {
|
||||||
vols, manifestSize, err := c.listVolumesWithManifestSize(newVolume)
|
vols, manifestSize, err := c.listVolumesWithManifestSize(newVolume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
|
@ -70,6 +71,9 @@ type Controller struct {
|
||||||
// NewController creates a new controller
|
// NewController creates a new controller
|
||||||
func NewController(controllerConfig *spec.ControllerConfig, controllerId string) *Controller {
|
func NewController(controllerConfig *spec.ControllerConfig, controllerId string) *Controller {
|
||||||
logger := logrus.New()
|
logger := logrus.New()
|
||||||
|
if controllerConfig.EnableJsonLogging {
|
||||||
|
logger.SetFormatter(&logrus.JSONFormatter{})
|
||||||
|
}
|
||||||
|
|
||||||
var myComponentName = "postgres-operator"
|
var myComponentName = "postgres-operator"
|
||||||
if controllerId != "" {
|
if controllerId != "" {
|
||||||
|
|
@ -300,7 +304,8 @@ func (c *Controller) initController() {
|
||||||
|
|
||||||
c.logger.Infof("config: %s", c.opConfig.MustMarshal())
|
c.logger.Infof("config: %s", c.opConfig.MustMarshal())
|
||||||
|
|
||||||
if infraRoles, err := c.getInfrastructureRoles(&c.opConfig.InfrastructureRolesSecretName); err != nil {
|
roleDefs := c.getInfrastructureRoleDefinitions()
|
||||||
|
if infraRoles, err := c.getInfrastructureRoles(roleDefs); err != nil {
|
||||||
c.logger.Warningf("could not get infrastructure roles: %v", err)
|
c.logger.Warningf("could not get infrastructure roles: %v", err)
|
||||||
} else {
|
} else {
|
||||||
c.config.InfrastructureRoles = infraRoles
|
c.config.InfrastructureRoles = infraRoles
|
||||||
|
|
@ -453,6 +458,37 @@ func (c *Controller) GetReference(postgresql *acidv1.Postgresql) *v1.ObjectRefer
|
||||||
return ref
|
return ref
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Controller) meetsClusterDeleteAnnotations(postgresql *acidv1.Postgresql) error {
|
||||||
|
|
||||||
|
deleteAnnotationDateKey := c.opConfig.DeleteAnnotationDateKey
|
||||||
|
currentTime := time.Now()
|
||||||
|
currentDate := currentTime.Format("2006-01-02") // go's reference date
|
||||||
|
|
||||||
|
if deleteAnnotationDateKey != "" {
|
||||||
|
if deleteDate, ok := postgresql.Annotations[deleteAnnotationDateKey]; ok {
|
||||||
|
if deleteDate != currentDate {
|
||||||
|
return fmt.Errorf("annotation %s not matching the current date: got %s, expected %s", deleteAnnotationDateKey, deleteDate, currentDate)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("annotation %s not set in manifest to allow cluster deletion", deleteAnnotationDateKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deleteAnnotationNameKey := c.opConfig.DeleteAnnotationNameKey
|
||||||
|
|
||||||
|
if deleteAnnotationNameKey != "" {
|
||||||
|
if clusterName, ok := postgresql.Annotations[deleteAnnotationNameKey]; ok {
|
||||||
|
if clusterName != postgresql.Name {
|
||||||
|
return fmt.Errorf("annotation %s not matching the cluster name: got %s, expected %s", deleteAnnotationNameKey, clusterName, postgresql.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("annotation %s not set in manifest to allow cluster deletion", deleteAnnotationNameKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// hasOwnership returns true if the controller is the "owner" of the postgresql.
|
// hasOwnership returns true if the controller is the "owner" of the postgresql.
|
||||||
// Whether it's owner is determined by the value of 'acid.zalan.do/controller'
|
// Whether it's owner is determined by the value of 'acid.zalan.do/controller'
|
||||||
// annotation. If the value matches the controllerID then it owns it, or if the
|
// annotation. If the value matches the controllerID then it owns it, or if the
|
||||||
|
|
|
||||||
|
|
@ -58,23 +58,44 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition
|
result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition
|
||||||
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition
|
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition
|
||||||
result.PodEnvironmentConfigMap = fromCRD.Kubernetes.PodEnvironmentConfigMap
|
result.PodEnvironmentConfigMap = fromCRD.Kubernetes.PodEnvironmentConfigMap
|
||||||
|
result.PodEnvironmentSecret = fromCRD.Kubernetes.PodEnvironmentSecret
|
||||||
result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m")
|
result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m")
|
||||||
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
|
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
|
||||||
|
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
|
||||||
|
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
|
||||||
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
||||||
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
||||||
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
||||||
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
||||||
result.EnablePodDisruptionBudget = util.CoalesceBool(fromCRD.Kubernetes.EnablePodDisruptionBudget, util.True())
|
result.EnablePodDisruptionBudget = util.CoalesceBool(fromCRD.Kubernetes.EnablePodDisruptionBudget, util.True())
|
||||||
|
result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "ebs")
|
||||||
result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True())
|
result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True())
|
||||||
result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True())
|
result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True())
|
||||||
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
||||||
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
|
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
|
||||||
|
|
||||||
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
|
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
|
||||||
|
if fromCRD.Kubernetes.InfrastructureRolesDefs != nil {
|
||||||
|
result.InfrastructureRoles = []*config.InfrastructureRole{}
|
||||||
|
for _, secret := range fromCRD.Kubernetes.InfrastructureRolesDefs {
|
||||||
|
result.InfrastructureRoles = append(
|
||||||
|
result.InfrastructureRoles,
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: secret.SecretName,
|
||||||
|
UserKey: secret.UserKey,
|
||||||
|
RoleKey: secret.RoleKey,
|
||||||
|
PasswordKey: secret.PasswordKey,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role")
|
result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role")
|
||||||
result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"})
|
result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"})
|
||||||
result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels
|
result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels
|
||||||
result.DownscalerAnnotations = fromCRD.Kubernetes.DownscalerAnnotations
|
result.DownscalerAnnotations = fromCRD.Kubernetes.DownscalerAnnotations
|
||||||
result.ClusterNameLabel = util.Coalesce(fromCRD.Kubernetes.ClusterNameLabel, "cluster-name")
|
result.ClusterNameLabel = util.Coalesce(fromCRD.Kubernetes.ClusterNameLabel, "cluster-name")
|
||||||
|
result.DeleteAnnotationDateKey = fromCRD.Kubernetes.DeleteAnnotationDateKey
|
||||||
|
result.DeleteAnnotationNameKey = fromCRD.Kubernetes.DeleteAnnotationNameKey
|
||||||
result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel
|
result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel
|
||||||
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
||||||
result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready")
|
result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready")
|
||||||
|
|
@ -105,6 +126,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations
|
result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations
|
||||||
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
|
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
|
||||||
result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat
|
result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat
|
||||||
|
result.ExternalTrafficPolicy = util.Coalesce(fromCRD.LoadBalancer.ExternalTrafficPolicy, "Cluster")
|
||||||
|
|
||||||
// AWS or GCP config
|
// AWS or GCP config
|
||||||
result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket
|
result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -420,6 +421,22 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.
|
||||||
clusterError = informerNewSpec.Error
|
clusterError = informerNewSpec.Error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// only allow deletion if delete annotations are set and conditions are met
|
||||||
|
if eventType == EventDelete {
|
||||||
|
if err := c.meetsClusterDeleteAnnotations(informerOldSpec); err != nil {
|
||||||
|
c.logger.WithField("cluster-name", clusterName).Warnf(
|
||||||
|
"ignoring %q event for cluster %q - manifest does not fulfill delete requirements: %s", eventType, clusterName, err)
|
||||||
|
c.logger.WithField("cluster-name", clusterName).Warnf(
|
||||||
|
"please, recreate Postgresql resource %q and set annotations to delete properly", clusterName)
|
||||||
|
if currentManifest, marshalErr := json.Marshal(informerOldSpec); marshalErr != nil {
|
||||||
|
c.logger.WithField("cluster-name", clusterName).Warnf("could not marshal current manifest:\n%+v", informerOldSpec)
|
||||||
|
} else {
|
||||||
|
c.logger.WithField("cluster-name", clusterName).Warnf("%s\n", string(currentManifest))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if clusterError != "" && eventType != EventDelete {
|
if clusterError != "" && eventType != EventDelete {
|
||||||
c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError)
|
c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,10 @@
|
||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
|
|
@ -90,3 +92,88 @@ func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMeetsClusterDeleteAnnotations(t *testing.T) {
|
||||||
|
// set delete annotations in configuration
|
||||||
|
postgresqlTestController.opConfig.DeleteAnnotationDateKey = "delete-date"
|
||||||
|
postgresqlTestController.opConfig.DeleteAnnotationNameKey = "delete-clustername"
|
||||||
|
|
||||||
|
currentTime := time.Now()
|
||||||
|
today := currentTime.Format("2006-01-02") // go's reference date
|
||||||
|
clusterName := "acid-test-cluster"
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
pg *acidv1.Postgresql
|
||||||
|
error string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"Postgres cluster with matching delete annotations",
|
||||||
|
&acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"delete-date": today,
|
||||||
|
"delete-clustername": clusterName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Postgres cluster with violated delete date annotation",
|
||||||
|
&acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"delete-date": "2020-02-02",
|
||||||
|
"delete-clustername": clusterName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
fmt.Sprintf("annotation delete-date not matching the current date: got 2020-02-02, expected %s", today),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Postgres cluster with violated delete cluster name annotation",
|
||||||
|
&acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"delete-date": today,
|
||||||
|
"delete-clustername": "acid-minimal-cluster",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
fmt.Sprintf("annotation delete-clustername not matching the cluster name: got acid-minimal-cluster, expected %s", clusterName),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Postgres cluster with missing delete annotations",
|
||||||
|
&acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Annotations: map[string]string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"annotation delete-date not set in manifest to allow cluster deletion",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Postgres cluster with missing delete cluster name annotation",
|
||||||
|
&acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"delete-date": today,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"annotation delete-clustername not set in manifest to allow cluster deletion",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
if err := postgresqlTestController.meetsClusterDeleteAnnotations(tt.pg); err != nil {
|
||||||
|
if !reflect.DeepEqual(err.Error(), tt.error) {
|
||||||
|
t.Errorf("Expected error %q, got: %v", tt.error, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||||
|
|
@ -14,6 +15,7 @@ import (
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/cluster"
|
"github.com/zalando/postgres-operator/pkg/cluster"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
@ -109,8 +111,163 @@ func readDecodedRole(s string) (*spec.PgUser, error) {
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (map[string]spec.PgUser, error) {
|
var emptyName = (spec.NamespacedName{})
|
||||||
if *rolesSecret == (spec.NamespacedName{}) {
|
|
||||||
|
// Return information about what secrets we need to use to create
|
||||||
|
// infrastructure roles and in which format are they. This is done in
|
||||||
|
// compatible way, so that the previous logic is not changed, and handles both
|
||||||
|
// configuration in ConfigMap & CRD.
|
||||||
|
func (c *Controller) getInfrastructureRoleDefinitions() []*config.InfrastructureRole {
|
||||||
|
var roleDef config.InfrastructureRole
|
||||||
|
|
||||||
|
// take from CRD configuration
|
||||||
|
rolesDefs := c.opConfig.InfrastructureRoles
|
||||||
|
|
||||||
|
// check if we can extract something from the configmap config option
|
||||||
|
if c.opConfig.InfrastructureRolesDefs != "" {
|
||||||
|
// The configmap option could contain either a role description (in the
|
||||||
|
// form key1: value1, key2: value2), which has to be used together with
|
||||||
|
// an old secret name.
|
||||||
|
|
||||||
|
var secretName spec.NamespacedName
|
||||||
|
var err error
|
||||||
|
propertySep := ","
|
||||||
|
valueSep := ":"
|
||||||
|
|
||||||
|
// The field contains the format in which secret is written, let's
|
||||||
|
// convert it to a proper definition
|
||||||
|
properties := strings.Split(c.opConfig.InfrastructureRolesDefs, propertySep)
|
||||||
|
roleDef = config.InfrastructureRole{Template: false}
|
||||||
|
|
||||||
|
for _, property := range properties {
|
||||||
|
values := strings.Split(property, valueSep)
|
||||||
|
if len(values) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := strings.TrimSpace(values[0])
|
||||||
|
value := strings.TrimSpace(values[1])
|
||||||
|
|
||||||
|
switch name {
|
||||||
|
case "secretname":
|
||||||
|
if err = secretName.DecodeWorker(value, "default"); err != nil {
|
||||||
|
c.logger.Warningf("Could not marshal secret name %s: %v", value, err)
|
||||||
|
} else {
|
||||||
|
roleDef.SecretName = secretName
|
||||||
|
}
|
||||||
|
case "userkey":
|
||||||
|
roleDef.UserKey = value
|
||||||
|
case "passwordkey":
|
||||||
|
roleDef.PasswordKey = value
|
||||||
|
case "rolekey":
|
||||||
|
roleDef.RoleKey = value
|
||||||
|
case "defaultuservalue":
|
||||||
|
roleDef.DefaultUserValue = value
|
||||||
|
case "defaultrolevalue":
|
||||||
|
roleDef.DefaultRoleValue = value
|
||||||
|
default:
|
||||||
|
c.logger.Warningf("Role description is not known: %s", properties)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if roleDef.SecretName != emptyName &&
|
||||||
|
(roleDef.UserKey != "" || roleDef.DefaultUserValue != "") &&
|
||||||
|
roleDef.PasswordKey != "" {
|
||||||
|
rolesDefs = append(rolesDefs, &roleDef)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.opConfig.InfrastructureRolesSecretName != emptyName {
|
||||||
|
// At this point we deal with the old format, let's replicate it
|
||||||
|
// via existing definition structure and remember that it's just a
|
||||||
|
// template, the real values are in user1,password1,inrole1 etc.
|
||||||
|
rolesDefs = append(rolesDefs, &config.InfrastructureRole{
|
||||||
|
SecretName: c.opConfig.InfrastructureRolesSecretName,
|
||||||
|
UserKey: "user",
|
||||||
|
PasswordKey: "password",
|
||||||
|
RoleKey: "inrole",
|
||||||
|
Template: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return rolesDefs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Controller) getInfrastructureRoles(
|
||||||
|
rolesSecrets []*config.InfrastructureRole) (
|
||||||
|
map[string]spec.PgUser, []error) {
|
||||||
|
|
||||||
|
var errors []error
|
||||||
|
var noRolesProvided = true
|
||||||
|
|
||||||
|
roles := []spec.PgUser{}
|
||||||
|
uniqRoles := map[string]spec.PgUser{}
|
||||||
|
|
||||||
|
// To be compatible with the legacy implementation we need to return nil if
|
||||||
|
// the provided secret name is empty. The equivalent situation in the
|
||||||
|
// current implementation is an empty rolesSecrets slice or all its items
|
||||||
|
// are empty.
|
||||||
|
for _, role := range rolesSecrets {
|
||||||
|
if role.SecretName != emptyName {
|
||||||
|
noRolesProvided = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if noRolesProvided {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, secret := range rolesSecrets {
|
||||||
|
infraRoles, err := c.getInfrastructureRole(secret)
|
||||||
|
|
||||||
|
if err != nil || infraRoles == nil {
|
||||||
|
c.logger.Debugf("Cannot get infrastructure role: %+v", *secret)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, r := range infraRoles {
|
||||||
|
roles = append(roles, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, r := range roles {
|
||||||
|
if _, exists := uniqRoles[r.Name]; exists {
|
||||||
|
msg := "Conflicting infrastructure roles: roles[%s] = (%q, %q)"
|
||||||
|
c.logger.Debugf(msg, r.Name, uniqRoles[r.Name], r)
|
||||||
|
}
|
||||||
|
|
||||||
|
uniqRoles[r.Name] = r
|
||||||
|
}
|
||||||
|
|
||||||
|
return uniqRoles, errors
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate list of users representing one infrastructure role based on its
|
||||||
|
// description in various K8S objects. An infrastructure role could be
|
||||||
|
// described by a secret and optionally a config map. The former should contain
|
||||||
|
// the secret information, i.e. username, password, role. The latter could
|
||||||
|
// contain an extensive description of the role and even override an
|
||||||
|
// information obtained from the secret (except a password).
|
||||||
|
//
|
||||||
|
// This function returns a list of users to be compatible with the previous
|
||||||
|
// behaviour, since we don't know how many users are actually encoded in the
|
||||||
|
// secret if it's a "template" role. If the provided role is not a template
|
||||||
|
// one, the result would be a list with just one user in it.
|
||||||
|
//
|
||||||
|
// FIXME: This dependency on two different objects is rather unnecessary
|
||||||
|
// complicated, so let's get rid of it via deprecation process.
|
||||||
|
func (c *Controller) getInfrastructureRole(
|
||||||
|
infraRole *config.InfrastructureRole) (
|
||||||
|
[]spec.PgUser, error) {
|
||||||
|
|
||||||
|
rolesSecret := infraRole.SecretName
|
||||||
|
roles := []spec.PgUser{}
|
||||||
|
|
||||||
|
if rolesSecret == emptyName {
|
||||||
// we don't have infrastructure roles defined, bail out
|
// we don't have infrastructure roles defined, bail out
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
@ -119,22 +276,27 @@ func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (m
|
||||||
Secrets(rolesSecret.Namespace).
|
Secrets(rolesSecret.Namespace).
|
||||||
Get(context.TODO(), rolesSecret.Name, metav1.GetOptions{})
|
Get(context.TODO(), rolesSecret.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Debugf("infrastructure roles secret name: %q", *rolesSecret)
|
msg := "could not get infrastructure roles secret %s/%s: %v"
|
||||||
return nil, fmt.Errorf("could not get infrastructure roles secret: %v", err)
|
return nil, fmt.Errorf(msg, rolesSecret.Namespace, rolesSecret.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
secretData := infraRolesSecret.Data
|
secretData := infraRolesSecret.Data
|
||||||
result := make(map[string]spec.PgUser)
|
|
||||||
|
if infraRole.Template {
|
||||||
Users:
|
Users:
|
||||||
// in worst case we would have one line per user
|
|
||||||
for i := 1; i <= len(secretData); i++ {
|
for i := 1; i <= len(secretData); i++ {
|
||||||
properties := []string{"user", "password", "inrole"}
|
properties := []string{
|
||||||
|
infraRole.UserKey,
|
||||||
|
infraRole.PasswordKey,
|
||||||
|
infraRole.RoleKey,
|
||||||
|
}
|
||||||
t := spec.PgUser{Origin: spec.RoleOriginInfrastructure}
|
t := spec.PgUser{Origin: spec.RoleOriginInfrastructure}
|
||||||
for _, p := range properties {
|
for _, p := range properties {
|
||||||
key := fmt.Sprintf("%s%d", p, i)
|
key := fmt.Sprintf("%s%d", p, i)
|
||||||
if val, present := secretData[key]; !present {
|
if val, present := secretData[key]; !present {
|
||||||
if p == "user" {
|
if p == "user" {
|
||||||
// exit when the user name with the next sequence id is absent
|
// exit when the user name with the next sequence id is
|
||||||
|
// absent
|
||||||
break Users
|
break Users
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -150,21 +312,63 @@ Users:
|
||||||
c.logger.Warningf("unknown key %q", p)
|
c.logger.Warningf("unknown key %q", p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// XXX: This is a part of the original implementation, which is
|
||||||
|
// rather obscure. Why do we delete this key? Wouldn't it be
|
||||||
|
// used later in comparison for configmap?
|
||||||
delete(secretData, key)
|
delete(secretData, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.Name != "" {
|
if t.Valid() {
|
||||||
if t.Password == "" {
|
roles = append(roles, t)
|
||||||
c.logger.Warningf("infrastructure role %q has no password defined and is ignored", t.Name)
|
} else {
|
||||||
continue
|
msg := "infrastructure role %q is not complete and ignored"
|
||||||
|
c.logger.Warningf(msg, t)
|
||||||
}
|
}
|
||||||
result[t.Name] = t
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
roleDescr := &spec.PgUser{Origin: spec.RoleOriginInfrastructure}
|
||||||
|
|
||||||
|
if details, exists := secretData[infraRole.Details]; exists {
|
||||||
|
if err := yaml.Unmarshal(details, &roleDescr); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not decode yaml role: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
roleDescr.Name = util.Coalesce(string(secretData[infraRole.UserKey]), infraRole.DefaultUserValue)
|
||||||
|
roleDescr.Password = string(secretData[infraRole.PasswordKey])
|
||||||
|
roleDescr.MemberOf = append(roleDescr.MemberOf,
|
||||||
|
util.Coalesce(string(secretData[infraRole.RoleKey]), infraRole.DefaultRoleValue))
|
||||||
}
|
}
|
||||||
|
|
||||||
// perhaps we have some map entries with usernames, passwords, let's check if we have those users in the configmap
|
if roleDescr.Valid() {
|
||||||
if infraRolesMap, err := c.KubeClient.ConfigMaps(rolesSecret.Namespace).Get(
|
roles = append(roles, *roleDescr)
|
||||||
context.TODO(), rolesSecret.Name, metav1.GetOptions{}); err == nil {
|
} else {
|
||||||
|
msg := "infrastructure role %q is not complete and ignored"
|
||||||
|
c.logger.Warningf(msg, roleDescr)
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if roleDescr.Name == "" {
|
||||||
|
msg := "infrastructure role %q has no name defined and is ignored"
|
||||||
|
c.logger.Warningf(msg, roleDescr.Name)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if roleDescr.Password == "" {
|
||||||
|
msg := "infrastructure role %q has no password defined and is ignored"
|
||||||
|
c.logger.Warningf(msg, roleDescr.Name)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
roles = append(roles, *roleDescr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now plot twist. We need to check if there is a configmap with the same
|
||||||
|
// name and extract a role description if it exists.
|
||||||
|
infraRolesMap, err := c.KubeClient.
|
||||||
|
ConfigMaps(rolesSecret.Namespace).
|
||||||
|
Get(context.TODO(), rolesSecret.Name, metav1.GetOptions{})
|
||||||
|
if err == nil {
|
||||||
// we have a configmap with username - json description, let's read and decode it
|
// we have a configmap with username - json description, let's read and decode it
|
||||||
for role, s := range infraRolesMap.Data {
|
for role, s := range infraRolesMap.Data {
|
||||||
roleDescr, err := readDecodedRole(s)
|
roleDescr, err := readDecodedRole(s)
|
||||||
|
|
@ -182,20 +386,12 @@ Users:
|
||||||
}
|
}
|
||||||
roleDescr.Name = role
|
roleDescr.Name = role
|
||||||
roleDescr.Origin = spec.RoleOriginInfrastructure
|
roleDescr.Origin = spec.RoleOriginInfrastructure
|
||||||
result[role] = *roleDescr
|
roles = append(roles, *roleDescr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(secretData) > 0 {
|
// TODO: check for role collisions
|
||||||
c.logger.Warningf("%d unprocessed entries in the infrastructure roles secret,"+
|
return roles, nil
|
||||||
" checking configmap %v", len(secretData), rolesSecret.Name)
|
|
||||||
c.logger.Info(`infrastructure role entries should be in the {key}{id} format,` +
|
|
||||||
` where {key} can be either of "user", "password", "inrole" and the {id}` +
|
|
||||||
` a monotonically increasing integer starting with 1`)
|
|
||||||
c.logger.Debugf("unprocessed entries: %#v", secretData)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) podClusterName(pod *v1.Pod) spec.NamespacedName {
|
func (c *Controller) podClusterName(pod *v1.Pod) spec.NamespacedName {
|
||||||
|
|
|
||||||
|
|
@ -8,20 +8,25 @@ import (
|
||||||
b64 "encoding/base64"
|
b64 "encoding/base64"
|
||||||
|
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
testInfrastructureRolesSecretName = "infrastructureroles-test"
|
testInfrastructureRolesOldSecretName = "infrastructureroles-old-test"
|
||||||
|
testInfrastructureRolesNewSecretName = "infrastructureroles-new-test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newUtilTestController() *Controller {
|
func newUtilTestController() *Controller {
|
||||||
controller := NewController(&spec.ControllerConfig{}, "util-test")
|
controller := NewController(&spec.ControllerConfig{}, "util-test")
|
||||||
controller.opConfig.ClusterNameLabel = "cluster-name"
|
controller.opConfig.ClusterNameLabel = "cluster-name"
|
||||||
controller.opConfig.InfrastructureRolesSecretName =
|
controller.opConfig.InfrastructureRolesSecretName =
|
||||||
spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesSecretName}
|
spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesOldSecretName,
|
||||||
|
}
|
||||||
controller.opConfig.Workers = 4
|
controller.opConfig.Workers = 4
|
||||||
controller.KubeClient = k8sutil.NewMockKubernetesClient()
|
controller.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||||
return controller
|
return controller
|
||||||
|
|
@ -80,24 +85,32 @@ func TestClusterWorkerID(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetInfrastructureRoles(t *testing.T) {
|
// Test functionality of getting infrastructure roles from their description in
|
||||||
|
// corresponding secrets. Here we test only common stuff (e.g. when a secret do
|
||||||
|
// not exist, or empty) and the old format.
|
||||||
|
func TestOldInfrastructureRoleFormat(t *testing.T) {
|
||||||
var testTable = []struct {
|
var testTable = []struct {
|
||||||
secretName spec.NamespacedName
|
secretName spec.NamespacedName
|
||||||
expectedRoles map[string]spec.PgUser
|
expectedRoles map[string]spec.PgUser
|
||||||
expectedError error
|
expectedErrors []error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
// empty secret name
|
||||||
spec.NamespacedName{},
|
spec.NamespacedName{},
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
// secret does not exist
|
||||||
spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: "null"},
|
spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: "null"},
|
||||||
nil,
|
map[string]spec.PgUser{},
|
||||||
fmt.Errorf(`could not get infrastructure roles secret: NotFound`),
|
[]error{fmt.Errorf(`could not get infrastructure roles secret default/null: NotFound`)},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesSecretName},
|
spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesOldSecretName,
|
||||||
|
},
|
||||||
map[string]spec.PgUser{
|
map[string]spec.PgUser{
|
||||||
"testrole": {
|
"testrole": {
|
||||||
Name: "testrole",
|
Name: "testrole",
|
||||||
|
|
@ -116,15 +129,354 @@ func TestGetInfrastructureRoles(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range testTable {
|
for _, test := range testTable {
|
||||||
roles, err := utilTestController.getInfrastructureRoles(&test.secretName)
|
roles, errors := utilTestController.getInfrastructureRoles(
|
||||||
if err != test.expectedError {
|
[]*config.InfrastructureRole{
|
||||||
if err != nil && test.expectedError != nil && err.Error() == test.expectedError.Error() {
|
&config.InfrastructureRole{
|
||||||
|
SecretName: test.secretName,
|
||||||
|
UserKey: "user",
|
||||||
|
PasswordKey: "password",
|
||||||
|
RoleKey: "inrole",
|
||||||
|
Template: true,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(errors) != len(test.expectedErrors) {
|
||||||
|
t.Errorf("expected error '%v' does not match the actual error '%v'",
|
||||||
|
test.expectedErrors, errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx := range errors {
|
||||||
|
err := errors[idx]
|
||||||
|
expectedErr := test.expectedErrors[idx]
|
||||||
|
|
||||||
|
if err != expectedErr {
|
||||||
|
if err != nil && expectedErr != nil && err.Error() == expectedErr.Error() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
t.Errorf("expected error '%v' does not match the actual error '%v'", test.expectedError, err)
|
t.Errorf("expected error '%v' does not match the actual error '%v'",
|
||||||
|
expectedErr, err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(roles, test.expectedRoles) {
|
if !reflect.DeepEqual(roles, test.expectedRoles) {
|
||||||
t.Errorf("expected roles output %v does not match the actual %v", test.expectedRoles, roles)
|
t.Errorf("expected roles output %#v does not match the actual %#v",
|
||||||
|
test.expectedRoles, roles)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test functionality of getting infrastructure roles from their description in
|
||||||
|
// corresponding secrets. Here we test the new format.
|
||||||
|
func TestNewInfrastructureRoleFormat(t *testing.T) {
|
||||||
|
var testTable = []struct {
|
||||||
|
secrets []spec.NamespacedName
|
||||||
|
expectedRoles map[string]spec.PgUser
|
||||||
|
expectedErrors []error
|
||||||
|
}{
|
||||||
|
// one secret with one configmap
|
||||||
|
{
|
||||||
|
[]spec.NamespacedName{
|
||||||
|
spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesNewSecretName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
map[string]spec.PgUser{
|
||||||
|
"new-test-role": {
|
||||||
|
Name: "new-test-role",
|
||||||
|
Origin: spec.RoleOriginInfrastructure,
|
||||||
|
Password: "new-test-password",
|
||||||
|
MemberOf: []string{"new-test-inrole"},
|
||||||
|
},
|
||||||
|
"new-foobar": {
|
||||||
|
Name: "new-foobar",
|
||||||
|
Origin: spec.RoleOriginInfrastructure,
|
||||||
|
Password: b64.StdEncoding.EncodeToString([]byte("password")),
|
||||||
|
MemberOf: nil,
|
||||||
|
Flags: []string{"createdb"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
// multiple standalone secrets
|
||||||
|
{
|
||||||
|
[]spec.NamespacedName{
|
||||||
|
spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: "infrastructureroles-new-test1",
|
||||||
|
},
|
||||||
|
spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: "infrastructureroles-new-test2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
map[string]spec.PgUser{
|
||||||
|
"new-test-role1": {
|
||||||
|
Name: "new-test-role1",
|
||||||
|
Origin: spec.RoleOriginInfrastructure,
|
||||||
|
Password: "new-test-password1",
|
||||||
|
MemberOf: []string{"new-test-inrole1"},
|
||||||
|
},
|
||||||
|
"new-test-role2": {
|
||||||
|
Name: "new-test-role2",
|
||||||
|
Origin: spec.RoleOriginInfrastructure,
|
||||||
|
Password: "new-test-password2",
|
||||||
|
MemberOf: []string{"new-test-inrole2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range testTable {
|
||||||
|
definitions := []*config.InfrastructureRole{}
|
||||||
|
for _, secret := range test.secrets {
|
||||||
|
definitions = append(definitions, &config.InfrastructureRole{
|
||||||
|
SecretName: secret,
|
||||||
|
UserKey: "user",
|
||||||
|
PasswordKey: "password",
|
||||||
|
RoleKey: "inrole",
|
||||||
|
Template: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
roles, errors := utilTestController.getInfrastructureRoles(definitions)
|
||||||
|
if len(errors) != len(test.expectedErrors) {
|
||||||
|
t.Errorf("expected error does not match the actual error:\n%+v\n%+v",
|
||||||
|
test.expectedErrors, errors)
|
||||||
|
|
||||||
|
// Stop and do not do any further checks
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx := range errors {
|
||||||
|
err := errors[idx]
|
||||||
|
expectedErr := test.expectedErrors[idx]
|
||||||
|
|
||||||
|
if err != expectedErr {
|
||||||
|
if err != nil && expectedErr != nil && err.Error() == expectedErr.Error() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Errorf("expected error '%v' does not match the actual error '%v'",
|
||||||
|
expectedErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(roles, test.expectedRoles) {
|
||||||
|
t.Errorf("expected roles output/the actual:\n%#v\n%#v",
|
||||||
|
test.expectedRoles, roles)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests for getting correct infrastructure roles definitions from present
|
||||||
|
// configuration. E.g. in which secrets for which roles too look. The biggest
|
||||||
|
// point here is compatibility of old and new formats of defining
|
||||||
|
// infrastructure roles.
|
||||||
|
func TestInfrastructureRoleDefinitions(t *testing.T) {
|
||||||
|
var testTable = []struct {
|
||||||
|
rolesDefs []*config.InfrastructureRole
|
||||||
|
roleSecretName spec.NamespacedName
|
||||||
|
roleSecrets string
|
||||||
|
expectedDefs []*config.InfrastructureRole
|
||||||
|
}{
|
||||||
|
// only new CRD format
|
||||||
|
{
|
||||||
|
[]*config.InfrastructureRole{
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesNewSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "test-user",
|
||||||
|
PasswordKey: "test-password",
|
||||||
|
RoleKey: "test-role",
|
||||||
|
Template: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
spec.NamespacedName{},
|
||||||
|
"",
|
||||||
|
[]*config.InfrastructureRole{
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesNewSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "test-user",
|
||||||
|
PasswordKey: "test-password",
|
||||||
|
RoleKey: "test-role",
|
||||||
|
Template: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// only new configmap format
|
||||||
|
{
|
||||||
|
[]*config.InfrastructureRole{},
|
||||||
|
spec.NamespacedName{},
|
||||||
|
"secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role",
|
||||||
|
[]*config.InfrastructureRole{
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesNewSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "test-user",
|
||||||
|
PasswordKey: "test-password",
|
||||||
|
RoleKey: "test-role",
|
||||||
|
Template: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// new configmap format with defaultRoleValue
|
||||||
|
{
|
||||||
|
[]*config.InfrastructureRole{},
|
||||||
|
spec.NamespacedName{},
|
||||||
|
"secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, defaultrolevalue: test-role",
|
||||||
|
[]*config.InfrastructureRole{
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesNewSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "test-user",
|
||||||
|
PasswordKey: "test-password",
|
||||||
|
DefaultRoleValue: "test-role",
|
||||||
|
Template: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// only old CRD and configmap format
|
||||||
|
{
|
||||||
|
[]*config.InfrastructureRole{},
|
||||||
|
spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesOldSecretName,
|
||||||
|
},
|
||||||
|
"",
|
||||||
|
[]*config.InfrastructureRole{
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesOldSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "user",
|
||||||
|
PasswordKey: "password",
|
||||||
|
RoleKey: "inrole",
|
||||||
|
Template: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// both formats for CRD
|
||||||
|
{
|
||||||
|
[]*config.InfrastructureRole{
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesNewSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "test-user",
|
||||||
|
PasswordKey: "test-password",
|
||||||
|
RoleKey: "test-role",
|
||||||
|
Template: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesOldSecretName,
|
||||||
|
},
|
||||||
|
"",
|
||||||
|
[]*config.InfrastructureRole{
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesNewSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "test-user",
|
||||||
|
PasswordKey: "test-password",
|
||||||
|
RoleKey: "test-role",
|
||||||
|
Template: false,
|
||||||
|
},
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesOldSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "user",
|
||||||
|
PasswordKey: "password",
|
||||||
|
RoleKey: "inrole",
|
||||||
|
Template: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// both formats for configmap
|
||||||
|
{
|
||||||
|
[]*config.InfrastructureRole{},
|
||||||
|
spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesOldSecretName,
|
||||||
|
},
|
||||||
|
"secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role",
|
||||||
|
[]*config.InfrastructureRole{
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesNewSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "test-user",
|
||||||
|
PasswordKey: "test-password",
|
||||||
|
RoleKey: "test-role",
|
||||||
|
Template: false,
|
||||||
|
},
|
||||||
|
&config.InfrastructureRole{
|
||||||
|
SecretName: spec.NamespacedName{
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
Name: testInfrastructureRolesOldSecretName,
|
||||||
|
},
|
||||||
|
UserKey: "user",
|
||||||
|
PasswordKey: "password",
|
||||||
|
RoleKey: "inrole",
|
||||||
|
Template: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// incorrect configmap format
|
||||||
|
{
|
||||||
|
[]*config.InfrastructureRole{},
|
||||||
|
spec.NamespacedName{},
|
||||||
|
"wrong-format",
|
||||||
|
[]*config.InfrastructureRole{},
|
||||||
|
},
|
||||||
|
// configmap without a secret
|
||||||
|
{
|
||||||
|
[]*config.InfrastructureRole{},
|
||||||
|
spec.NamespacedName{},
|
||||||
|
"userkey: test-user, passwordkey: test-password, rolekey: test-role",
|
||||||
|
[]*config.InfrastructureRole{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range testTable {
|
||||||
|
t.Logf("Test: %+v", test)
|
||||||
|
utilTestController.opConfig.InfrastructureRoles = test.rolesDefs
|
||||||
|
utilTestController.opConfig.InfrastructureRolesSecretName = test.roleSecretName
|
||||||
|
utilTestController.opConfig.InfrastructureRolesDefs = test.roleSecrets
|
||||||
|
|
||||||
|
defs := utilTestController.getInfrastructureRoleDefinitions()
|
||||||
|
if len(defs) != len(test.expectedDefs) {
|
||||||
|
t.Errorf("expected definitions does not match the actual:\n%#v\n%#v",
|
||||||
|
test.expectedDefs, defs)
|
||||||
|
|
||||||
|
// Stop and do not do any further checks
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx := range defs {
|
||||||
|
def := defs[idx]
|
||||||
|
expectedDef := test.expectedDefs[idx]
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(def, expectedDef) {
|
||||||
|
t.Errorf("expected definition/the actual:\n%#v\n%#v",
|
||||||
|
expectedDef, def)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -55,6 +55,10 @@ type PgUser struct {
|
||||||
AdminRole string `yaml:"admin_role"`
|
AdminRole string `yaml:"admin_role"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (user *PgUser) Valid() bool {
|
||||||
|
return user.Name != "" && user.Password != ""
|
||||||
|
}
|
||||||
|
|
||||||
// PgUserMap maps user names to the definitions.
|
// PgUserMap maps user names to the definitions.
|
||||||
type PgUserMap map[string]PgUser
|
type PgUserMap map[string]PgUser
|
||||||
|
|
||||||
|
|
@ -110,6 +114,8 @@ type ControllerConfig struct {
|
||||||
CRDReadyWaitTimeout time.Duration
|
CRDReadyWaitTimeout time.Duration
|
||||||
ConfigMapName NamespacedName
|
ConfigMapName NamespacedName
|
||||||
Namespace string
|
Namespace string
|
||||||
|
|
||||||
|
EnableJsonLogging bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// cached value for the GetOperatorNamespace
|
// cached value for the GetOperatorNamespace
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,8 @@ type Resources struct {
|
||||||
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
||||||
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
||||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||||
|
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||||
|
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||||
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
||||||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||||
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
||||||
|
|
@ -36,6 +38,8 @@ type Resources struct {
|
||||||
InheritedLabels []string `name:"inherited_labels" default:""`
|
InheritedLabels []string `name:"inherited_labels" default:""`
|
||||||
DownscalerAnnotations []string `name:"downscaler_annotations"`
|
DownscalerAnnotations []string `name:"downscaler_annotations"`
|
||||||
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
||||||
|
DeleteAnnotationDateKey string `name:"delete_annotation_date_key"`
|
||||||
|
DeleteAnnotationNameKey string `name:"delete_annotation_name_key"`
|
||||||
PodRoleLabel string `name:"pod_role_label" default:"spilo-role"`
|
PodRoleLabel string `name:"pod_role_label" default:"spilo-role"`
|
||||||
PodToleration map[string]string `name:"toleration" default:""`
|
PodToleration map[string]string `name:"toleration" default:""`
|
||||||
DefaultCPURequest string `name:"default_cpu_request" default:"100m"`
|
DefaultCPURequest string `name:"default_cpu_request" default:"100m"`
|
||||||
|
|
@ -45,12 +49,40 @@ type Resources struct {
|
||||||
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
|
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
|
||||||
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
|
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
|
||||||
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
||||||
|
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
||||||
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
||||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||||
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type InfrastructureRole struct {
|
||||||
|
// Name of a secret which describes the role, and optionally name of a
|
||||||
|
// configmap with an extra information
|
||||||
|
SecretName spec.NamespacedName
|
||||||
|
|
||||||
|
UserKey string
|
||||||
|
PasswordKey string
|
||||||
|
RoleKey string
|
||||||
|
|
||||||
|
DefaultUserValue string
|
||||||
|
DefaultRoleValue string
|
||||||
|
|
||||||
|
// This field point out the detailed yaml definition of the role, if exists
|
||||||
|
Details string
|
||||||
|
|
||||||
|
// Specify if a secret contains multiple fields in the following format:
|
||||||
|
//
|
||||||
|
// %(userkey)idx: ...
|
||||||
|
// %(passwordkey)idx: ...
|
||||||
|
// %(rolekey)idx: ...
|
||||||
|
//
|
||||||
|
// If it does, Name/Password/Role are interpreted not as unique field
|
||||||
|
// names, but as a template.
|
||||||
|
|
||||||
|
Template bool
|
||||||
|
}
|
||||||
|
|
||||||
// Auth describes authentication specific configuration parameters
|
// Auth describes authentication specific configuration parameters
|
||||||
type Auth struct {
|
type Auth struct {
|
||||||
SecretNameTemplate StringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"`
|
SecretNameTemplate StringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"`
|
||||||
|
|
@ -59,6 +91,8 @@ type Auth struct {
|
||||||
TeamsAPIUrl string `name:"teams_api_url" default:"https://teams.example.com/api/"`
|
TeamsAPIUrl string `name:"teams_api_url" default:"https://teams.example.com/api/"`
|
||||||
OAuthTokenSecretName spec.NamespacedName `name:"oauth_token_secret_name" default:"postgresql-operator"`
|
OAuthTokenSecretName spec.NamespacedName `name:"oauth_token_secret_name" default:"postgresql-operator"`
|
||||||
InfrastructureRolesSecretName spec.NamespacedName `name:"infrastructure_roles_secret_name"`
|
InfrastructureRolesSecretName spec.NamespacedName `name:"infrastructure_roles_secret_name"`
|
||||||
|
InfrastructureRoles []*InfrastructureRole `name:"-"`
|
||||||
|
InfrastructureRolesDefs string `name:"infrastructure_roles_secrets"`
|
||||||
SuperUsername string `name:"super_username" default:"postgres"`
|
SuperUsername string `name:"super_username" default:"postgres"`
|
||||||
ReplicationUsername string `name:"replication_username" default:"standby"`
|
ReplicationUsername string `name:"replication_username" default:"standby"`
|
||||||
}
|
}
|
||||||
|
|
@ -113,8 +147,7 @@ type Config struct {
|
||||||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p3"`
|
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p3"`
|
||||||
// deprecated in favour of SidecarContainers
|
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
|
||||||
SidecarImages map[string]string `name:"sidecar_docker_images"`
|
|
||||||
SidecarContainers []v1.Container `name:"sidecars"`
|
SidecarContainers []v1.Container `name:"sidecars"`
|
||||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||||
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
||||||
|
|
@ -142,8 +175,9 @@ type Config struct {
|
||||||
CustomPodAnnotations map[string]string `name:"custom_pod_annotations"`
|
CustomPodAnnotations map[string]string `name:"custom_pod_annotations"`
|
||||||
EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"`
|
EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"`
|
||||||
PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"`
|
PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"`
|
||||||
// deprecated and kept for backward compatibility
|
StorageResizeMode string `name:"storage_resize_mode" default:"ebs"`
|
||||||
EnableLoadBalancer *bool `name:"enable_load_balancer"`
|
EnableLoadBalancer *bool `name:"enable_load_balancer"` // deprecated and kept for backward compatibility
|
||||||
|
ExternalTrafficPolicy string `name:"external_traffic_policy" default:"Cluster"`
|
||||||
MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"`
|
MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"`
|
||||||
ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"`
|
ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"`
|
||||||
PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"`
|
PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"`
|
||||||
|
|
|
||||||
|
|
@ -18,4 +18,5 @@ const (
|
||||||
ReaderRoleNameSuffix = "_reader"
|
ReaderRoleNameSuffix = "_reader"
|
||||||
WriterRoleNameSuffix = "_writer"
|
WriterRoleNameSuffix = "_writer"
|
||||||
UserRoleNameSuffix = "_user"
|
UserRoleNameSuffix = "_user"
|
||||||
|
DefaultSearchPath = "\"$user\""
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -271,33 +271,75 @@ func SameLogicalBackupJob(cur, new *batchv1beta1.CronJob) (match bool, reason st
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) {
|
func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) {
|
||||||
if name != "infrastructureroles-test" {
|
oldFormatSecret := &v1.Secret{}
|
||||||
return nil, fmt.Errorf("NotFound")
|
oldFormatSecret.Name = "testcluster"
|
||||||
}
|
oldFormatSecret.Data = map[string][]byte{
|
||||||
secret := &v1.Secret{}
|
|
||||||
secret.Name = "testcluster"
|
|
||||||
secret.Data = map[string][]byte{
|
|
||||||
"user1": []byte("testrole"),
|
"user1": []byte("testrole"),
|
||||||
"password1": []byte("testpassword"),
|
"password1": []byte("testpassword"),
|
||||||
"inrole1": []byte("testinrole"),
|
"inrole1": []byte("testinrole"),
|
||||||
"foobar": []byte(b64.StdEncoding.EncodeToString([]byte("password"))),
|
"foobar": []byte(b64.StdEncoding.EncodeToString([]byte("password"))),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
newFormatSecret := &v1.Secret{}
|
||||||
|
newFormatSecret.Name = "test-secret-new-format"
|
||||||
|
newFormatSecret.Data = map[string][]byte{
|
||||||
|
"user": []byte("new-test-role"),
|
||||||
|
"password": []byte("new-test-password"),
|
||||||
|
"inrole": []byte("new-test-inrole"),
|
||||||
|
"new-foobar": []byte(b64.StdEncoding.EncodeToString([]byte("password"))),
|
||||||
|
}
|
||||||
|
|
||||||
|
secrets := map[string]*v1.Secret{
|
||||||
|
"infrastructureroles-old-test": oldFormatSecret,
|
||||||
|
"infrastructureroles-new-test": newFormatSecret,
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx := 1; idx <= 2; idx++ {
|
||||||
|
newFormatStandaloneSecret := &v1.Secret{}
|
||||||
|
newFormatStandaloneSecret.Name = fmt.Sprintf("test-secret-new-format%d", idx)
|
||||||
|
newFormatStandaloneSecret.Data = map[string][]byte{
|
||||||
|
"user": []byte(fmt.Sprintf("new-test-role%d", idx)),
|
||||||
|
"password": []byte(fmt.Sprintf("new-test-password%d", idx)),
|
||||||
|
"inrole": []byte(fmt.Sprintf("new-test-inrole%d", idx)),
|
||||||
|
}
|
||||||
|
|
||||||
|
secrets[fmt.Sprintf("infrastructureroles-new-test%d", idx)] =
|
||||||
|
newFormatStandaloneSecret
|
||||||
|
}
|
||||||
|
|
||||||
|
if secret, exists := secrets[name]; exists {
|
||||||
return secret, nil
|
return secret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("NotFound")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) {
|
func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) {
|
||||||
if name != "infrastructureroles-test" {
|
oldFormatConfigmap := &v1.ConfigMap{}
|
||||||
return nil, fmt.Errorf("NotFound")
|
oldFormatConfigmap.Name = "testcluster"
|
||||||
}
|
oldFormatConfigmap.Data = map[string]string{
|
||||||
configmap := &v1.ConfigMap{}
|
|
||||||
configmap.Name = "testcluster"
|
|
||||||
configmap.Data = map[string]string{
|
|
||||||
"foobar": "{}",
|
"foobar": "{}",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
newFormatConfigmap := &v1.ConfigMap{}
|
||||||
|
newFormatConfigmap.Name = "testcluster"
|
||||||
|
newFormatConfigmap.Data = map[string]string{
|
||||||
|
"new-foobar": "{\"user_flags\": [\"createdb\"]}",
|
||||||
|
}
|
||||||
|
|
||||||
|
configmaps := map[string]*v1.ConfigMap{
|
||||||
|
"infrastructureroles-old-test": oldFormatConfigmap,
|
||||||
|
"infrastructureroles-new-test": newFormatConfigmap,
|
||||||
|
}
|
||||||
|
|
||||||
|
if configmap, exists := configmaps[name]; exists {
|
||||||
return configmap, nil
|
return configmap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("NotFound")
|
||||||
|
}
|
||||||
|
|
||||||
// Secrets to be mocked
|
// Secrets to be mocked
|
||||||
func (mock *MockSecretGetter) Secrets(namespace string) corev1.SecretInterface {
|
func (mock *MockSecretGetter) Secrets(namespace string) corev1.SecretInterface {
|
||||||
return &mockSecret{}
|
return &mockSecret{}
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ const (
|
||||||
// an existing roles of another role membership, nor it removes the already assigned flag
|
// an existing roles of another role membership, nor it removes the already assigned flag
|
||||||
// (except for the NOLOGIN). TODO: process other NOflags, i.e. NOSUPERUSER correctly.
|
// (except for the NOLOGIN). TODO: process other NOflags, i.e. NOSUPERUSER correctly.
|
||||||
type DefaultUserSyncStrategy struct {
|
type DefaultUserSyncStrategy struct {
|
||||||
|
PasswordEncryption string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProduceSyncRequests figures out the types of changes that need to happen with the given users.
|
// ProduceSyncRequests figures out the types of changes that need to happen with the given users.
|
||||||
|
|
@ -45,7 +46,7 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r := spec.PgSyncUserRequest{}
|
r := spec.PgSyncUserRequest{}
|
||||||
newMD5Password := util.PGUserPassword(newUser)
|
newMD5Password := util.NewEncryptor(strategy.PasswordEncryption).PGUserPassword(newUser)
|
||||||
|
|
||||||
if dbUser.Password != newMD5Password {
|
if dbUser.Password != newMD5Password {
|
||||||
r.User.Password = newMD5Password
|
r.User.Password = newMD5Password
|
||||||
|
|
@ -113,14 +114,14 @@ func (strategy DefaultUserSyncStrategy) ExecuteSyncRequests(requests []spec.PgSy
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql.DB) (err error) {
|
|
||||||
|
func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql.DB) error {
|
||||||
queries := produceAlterRoleSetStmts(user)
|
queries := produceAlterRoleSetStmts(user)
|
||||||
query := fmt.Sprintf(doBlockStmt, strings.Join(queries, ";"))
|
query := fmt.Sprintf(doBlockStmt, strings.Join(queries, ";"))
|
||||||
if _, err = db.Exec(query); err != nil {
|
if _, err := db.Exec(query); err != nil {
|
||||||
err = fmt.Errorf("dB error: %v, query: %s", err, query)
|
return fmt.Errorf("dB error: %v, query: %s", err, query)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.DB) error {
|
func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.DB) error {
|
||||||
|
|
@ -140,7 +141,7 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D
|
||||||
if user.Password == "" {
|
if user.Password == "" {
|
||||||
userPassword = "PASSWORD NULL"
|
userPassword = "PASSWORD NULL"
|
||||||
} else {
|
} else {
|
||||||
userPassword = fmt.Sprintf(passwordTemplate, util.PGUserPassword(user))
|
userPassword = fmt.Sprintf(passwordTemplate, util.NewEncryptor(strategy.PasswordEncryption).PGUserPassword(user))
|
||||||
}
|
}
|
||||||
query := fmt.Sprintf(createUserSQL, user.Name, strings.Join(userFlags, " "), userPassword)
|
query := fmt.Sprintf(createUserSQL, user.Name, strings.Join(userFlags, " "), userPassword)
|
||||||
|
|
||||||
|
|
@ -148,6 +149,12 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D
|
||||||
return fmt.Errorf("dB error: %v, query: %s", err, query)
|
return fmt.Errorf("dB error: %v, query: %s", err, query)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(user.Parameters) > 0 {
|
||||||
|
if err := strategy.alterPgUserSet(user, db); err != nil {
|
||||||
|
return fmt.Errorf("incomplete setup for user %s: %v", user.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -155,7 +162,7 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB
|
||||||
var resultStmt []string
|
var resultStmt []string
|
||||||
|
|
||||||
if user.Password != "" || len(user.Flags) > 0 {
|
if user.Password != "" || len(user.Flags) > 0 {
|
||||||
alterStmt := produceAlterStmt(user)
|
alterStmt := produceAlterStmt(user, strategy.PasswordEncryption)
|
||||||
resultStmt = append(resultStmt, alterStmt)
|
resultStmt = append(resultStmt, alterStmt)
|
||||||
}
|
}
|
||||||
if len(user.MemberOf) > 0 {
|
if len(user.MemberOf) > 0 {
|
||||||
|
|
@ -174,14 +181,14 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func produceAlterStmt(user spec.PgUser) string {
|
func produceAlterStmt(user spec.PgUser, encryption string) string {
|
||||||
// ALTER ROLE ... LOGIN ENCRYPTED PASSWORD ..
|
// ALTER ROLE ... LOGIN ENCRYPTED PASSWORD ..
|
||||||
result := make([]string, 0)
|
result := make([]string, 0)
|
||||||
password := user.Password
|
password := user.Password
|
||||||
flags := user.Flags
|
flags := user.Flags
|
||||||
|
|
||||||
if password != "" {
|
if password != "" {
|
||||||
result = append(result, fmt.Sprintf(passwordTemplate, util.PGUserPassword(user)))
|
result = append(result, fmt.Sprintf(passwordTemplate, util.NewEncryptor(encryption).PGUserPassword(user)))
|
||||||
}
|
}
|
||||||
if len(flags) != 0 {
|
if len(flags) != 0 {
|
||||||
result = append(result, strings.Join(flags, " "))
|
result = append(result, strings.Join(flags, " "))
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,11 @@
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
"crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords
|
"crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords
|
||||||
cryptoRand "crypto/rand"
|
cryptoRand "crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
@ -16,10 +19,14 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
|
"golang.org/x/crypto/pbkdf2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
md5prefix = "md5"
|
md5prefix = "md5"
|
||||||
|
scramsha256prefix = "SCRAM-SHA-256"
|
||||||
|
saltlength = 16
|
||||||
|
iterations = 4096
|
||||||
)
|
)
|
||||||
|
|
||||||
var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
|
var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
|
||||||
|
|
@ -61,16 +68,62 @@ func NameFromMeta(meta metav1.ObjectMeta) spec.NamespacedName {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PGUserPassword is used to generate md5 password hash for a given user. It does nothing for already hashed passwords.
|
type Hasher func(user spec.PgUser) string
|
||||||
func PGUserPassword(user spec.PgUser) string {
|
type Random func(n int) string
|
||||||
if (len(user.Password) == md5.Size*2+len(md5prefix) && user.Password[:3] == md5prefix) || user.Password == "" {
|
|
||||||
|
type Encryptor struct {
|
||||||
|
encrypt Hasher
|
||||||
|
random Random
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEncryptor(encryption string) *Encryptor {
|
||||||
|
e := Encryptor{random: RandomPassword}
|
||||||
|
m := map[string]Hasher{
|
||||||
|
"md5": e.PGUserPasswordMD5,
|
||||||
|
"scram-sha-256": e.PGUserPasswordScramSHA256,
|
||||||
|
}
|
||||||
|
hasher, ok := m[encryption]
|
||||||
|
if !ok {
|
||||||
|
hasher = e.PGUserPasswordMD5
|
||||||
|
}
|
||||||
|
e.encrypt = hasher
|
||||||
|
return &e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Encryptor) PGUserPassword(user spec.PgUser) string {
|
||||||
|
if (len(user.Password) == md5.Size*2+len(md5prefix) && user.Password[:3] == md5prefix) ||
|
||||||
|
(len(user.Password) > len(scramsha256prefix) && user.Password[:len(scramsha256prefix)] == scramsha256prefix) || user.Password == "" {
|
||||||
// Avoid processing already encrypted or empty passwords
|
// Avoid processing already encrypted or empty passwords
|
||||||
return user.Password
|
return user.Password
|
||||||
}
|
}
|
||||||
|
return e.encrypt(user)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Encryptor) PGUserPasswordMD5(user spec.PgUser) string {
|
||||||
s := md5.Sum([]byte(user.Password + user.Name)) // #nosec, using md5 since PostgreSQL uses it for hashing passwords.
|
s := md5.Sum([]byte(user.Password + user.Name)) // #nosec, using md5 since PostgreSQL uses it for hashing passwords.
|
||||||
return md5prefix + hex.EncodeToString(s[:])
|
return md5prefix + hex.EncodeToString(s[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *Encryptor) PGUserPasswordScramSHA256(user spec.PgUser) string {
|
||||||
|
salt := []byte(e.random(saltlength))
|
||||||
|
key := pbkdf2.Key([]byte(user.Password), salt, iterations, 32, sha256.New)
|
||||||
|
mac := hmac.New(sha256.New, key)
|
||||||
|
mac.Write([]byte("Server Key"))
|
||||||
|
serverKey := mac.Sum(nil)
|
||||||
|
mac = hmac.New(sha256.New, key)
|
||||||
|
mac.Write([]byte("Client Key"))
|
||||||
|
clientKey := mac.Sum(nil)
|
||||||
|
storedKey := sha256.Sum256(clientKey)
|
||||||
|
pass := fmt.Sprintf("%s$%v:%s$%s:%s",
|
||||||
|
scramsha256prefix,
|
||||||
|
iterations,
|
||||||
|
base64.StdEncoding.EncodeToString(salt),
|
||||||
|
base64.StdEncoding.EncodeToString(storedKey[:]),
|
||||||
|
base64.StdEncoding.EncodeToString(serverKey),
|
||||||
|
)
|
||||||
|
return pass
|
||||||
|
}
|
||||||
|
|
||||||
// Diff returns diffs between 2 objects
|
// Diff returns diffs between 2 objects
|
||||||
func Diff(a, b interface{}) []string {
|
func Diff(a, b interface{}) []string {
|
||||||
return pretty.Diff(a, b)
|
return pretty.Diff(a, b)
|
||||||
|
|
|
||||||
|
|
@ -13,19 +13,26 @@ import (
|
||||||
|
|
||||||
var pgUsers = []struct {
|
var pgUsers = []struct {
|
||||||
in spec.PgUser
|
in spec.PgUser
|
||||||
out string
|
outmd5 string
|
||||||
|
outscramsha256 string
|
||||||
}{{spec.PgUser{
|
}{{spec.PgUser{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
Password: "password",
|
Password: "password",
|
||||||
Flags: []string{},
|
Flags: []string{},
|
||||||
MemberOf: []string{}},
|
MemberOf: []string{}},
|
||||||
"md587f77988ccb5aa917c93201ba314fcd4"},
|
"md587f77988ccb5aa917c93201ba314fcd4", "SCRAM-SHA-256$4096:c2FsdA==$lF4cRm/Jky763CN4HtxdHnjV4Q8AWTNlKvGmEFFU8IQ=:ub8OgRsftnk2ccDMOt7ffHXNcikRkQkq1lh4xaAqrSw="},
|
||||||
{spec.PgUser{
|
{spec.PgUser{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
Password: "md592f413f3974bdf3799bb6fecb5f9f2c6",
|
Password: "md592f413f3974bdf3799bb6fecb5f9f2c6",
|
||||||
Flags: []string{},
|
Flags: []string{},
|
||||||
MemberOf: []string{}},
|
MemberOf: []string{}},
|
||||||
"md592f413f3974bdf3799bb6fecb5f9f2c6"}}
|
"md592f413f3974bdf3799bb6fecb5f9f2c6", "md592f413f3974bdf3799bb6fecb5f9f2c6"},
|
||||||
|
{spec.PgUser{
|
||||||
|
Name: "test",
|
||||||
|
Password: "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs=",
|
||||||
|
Flags: []string{},
|
||||||
|
MemberOf: []string{}},
|
||||||
|
"SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs=", "SCRAM-SHA-256$4096:S1ByZWhvYVV5VDlJNGZoVw==$ozLevu5k0pAQYRrSY+vZhetO6+/oB+qZvuutOdXR94U=:yADwhy0LGloXzh5RaVwLMFyUokwI17VkHVfKVuHu0Zs="}}
|
||||||
|
|
||||||
var prettyDiffTest = []struct {
|
var prettyDiffTest = []struct {
|
||||||
inA interface{}
|
inA interface{}
|
||||||
|
|
@ -107,9 +114,16 @@ func TestNameFromMeta(t *testing.T) {
|
||||||
|
|
||||||
func TestPGUserPassword(t *testing.T) {
|
func TestPGUserPassword(t *testing.T) {
|
||||||
for _, tt := range pgUsers {
|
for _, tt := range pgUsers {
|
||||||
pwd := PGUserPassword(tt.in)
|
e := NewEncryptor("md5")
|
||||||
if pwd != tt.out {
|
pwd := e.PGUserPassword(tt.in)
|
||||||
t.Errorf("PgUserPassword expected: %q, got: %q", tt.out, pwd)
|
if pwd != tt.outmd5 {
|
||||||
|
t.Errorf("PgUserPassword expected: %q, got: %q", tt.outmd5, pwd)
|
||||||
|
}
|
||||||
|
e = NewEncryptor("scram-sha-256")
|
||||||
|
e.random = func(n int) string { return "salt" }
|
||||||
|
pwd = e.PGUserPassword(tt.in)
|
||||||
|
if pwd != tt.outscramsha256 {
|
||||||
|
t.Errorf("PgUserPassword expected: %q, got: %q", tt.outscramsha256, pwd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,7 @@
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
resources:
|
||||||
|
- deployment.yaml
|
||||||
|
- ingress.yaml
|
||||||
|
- service.yaml
|
||||||
|
- ui-service-account-rbac.yaml
|
||||||
Loading…
Reference in New Issue