merge with master
This commit is contained in:
commit
7e2e0db311
|
|
@ -1,3 +1,4 @@
|
||||||
|
{{- if eq .Values.configTarget "ConfigMap" }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
|
|
@ -13,21 +14,14 @@ data:
|
||||||
{{- else }}
|
{{- else }}
|
||||||
{{ .Values.serviceAccount.name }}
|
{{ .Values.serviceAccount.name }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
api_port: "{{ .Values.configLoggingRestApi.api_port }}"
|
{{ toYaml .Values.configGeneral | indent 2 }}
|
||||||
cluster_history_entries: "{{ .Values.configLoggingRestApi.cluster_history_entries }}"
|
|
||||||
docker_image: {{ .Values.docker_image }}
|
|
||||||
debug_logging: "{{ .Values.configDebug.debug_logging }}"
|
|
||||||
enable_database_access: "{{ .Values.configDebug.enable_database_access }}"
|
|
||||||
enable_shm_volume: "{{ .Values.enable_shm_volume }}"
|
|
||||||
repair_period: {{ .Values.repair_period }}
|
|
||||||
resync_period: {{ .Values.resync_period }}
|
|
||||||
ring_log_lines: "{{ .Values.configLoggingRestApi.ring_log_lines }}"
|
|
||||||
spilo_privileged: "{{ .Values.spilo_privileged }}"
|
|
||||||
workers: "{{ .Values.workers }}"
|
|
||||||
{{ toYaml .Values.configMap | indent 2 }}
|
|
||||||
{{ toYaml .Values.configUsers | indent 2 }}
|
{{ toYaml .Values.configUsers | indent 2 }}
|
||||||
{{ toYaml .Values.configKubernetes | indent 2 }}
|
{{ toYaml .Values.configKubernetes | indent 2 }}
|
||||||
{{ toYaml .Values.configTimeouts | indent 2 }}
|
{{ toYaml .Values.configTimeouts | indent 2 }}
|
||||||
{{ toYaml .Values.configLoadBalancer | indent 2 }}
|
{{ toYaml .Values.configLoadBalancer | indent 2 }}
|
||||||
{{ toYaml .Values.configAwsOrGcp | indent 2 }}
|
{{ toYaml .Values.configAwsOrGcp | indent 2 }}
|
||||||
|
{{ toYaml .Values.configLogicalBackup | indent 2 }}
|
||||||
|
{{ toYaml .Values.configDebug | indent 2 }}
|
||||||
|
{{ toYaml .Values.configLoggingRestApi | indent 2 }}
|
||||||
{{ toYaml .Values.configTeamsApi | indent 2 }}
|
{{ toYaml .Values.configTeamsApi | indent 2 }}
|
||||||
|
{{- end }}
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: postgresqls.acid.zalan.do
|
name: postgresqls.acid.zalan.do
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||||
|
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
annotations:
|
annotations:
|
||||||
"helm.sh/hook": crd-install
|
"helm.sh/hook": crd-install
|
||||||
spec:
|
spec:
|
||||||
|
|
@ -22,6 +27,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: operatorconfigurations.acid.zalan.do
|
name: operatorconfigurations.acid.zalan.do
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||||
|
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
annotations:
|
annotations:
|
||||||
"helm.sh/hook": crd-install
|
"helm.sh/hook": crd-install
|
||||||
spec:
|
spec:
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
apiVersion: apps/v1beta2
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
|
|
@ -16,9 +16,11 @@ spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
|
{{- if eq .Values.configTarget "ConfigMap" }}
|
||||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||||
# In order to use the checksum of CRD OperatorConfiguration instead, use the following line instead
|
{{- else }}
|
||||||
# {{ include (print $.Template.BasePath "/operatorconfiguration.yaml") . | sha256sum }}
|
checksum/config: {{ include (print $.Template.BasePath "/operatorconfiguration.yaml") . | sha256sum }}
|
||||||
|
{{- end }}
|
||||||
{{- if .Values.podAnnotations }}
|
{{- if .Values.podAnnotations }}
|
||||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
@ -39,11 +41,13 @@ spec:
|
||||||
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
env:
|
env:
|
||||||
|
{{- if eq .Values.configTarget "ConfigMap" }}
|
||||||
- name: CONFIG_MAP_NAME
|
- name: CONFIG_MAP_NAME
|
||||||
value: {{ template "postgres-operator.fullname" . }}
|
value: {{ template "postgres-operator.fullname" . }}
|
||||||
# In order to use the CRD OperatorConfiguration instead, uncomment these lines and comment out the two lines above
|
{{- else }}
|
||||||
# - name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT
|
- name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT
|
||||||
# value: {{ template "postgres-operator.fullname" . }}
|
value: {{ template "postgres-operator.fullname" . }}
|
||||||
|
{{- end }}
|
||||||
resources:
|
resources:
|
||||||
{{ toYaml .Values.resources | indent 10 }}
|
{{ toYaml .Values.resources | indent 10 }}
|
||||||
{{- if .Values.imagePullSecrets }}
|
{{- if .Values.imagePullSecrets }}
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
{{- if eq .Values.configTarget "OperatorConfigurationCRD" }}
|
||||||
apiVersion: "acid.zalan.do/v1"
|
apiVersion: "acid.zalan.do/v1"
|
||||||
kind: OperatorConfiguration
|
kind: OperatorConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
|
|
@ -8,26 +9,19 @@ metadata:
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
configuration:
|
configuration:
|
||||||
docker_image: {{ .Values.docker_image }}
|
{{ toYaml .Values.configGeneral | indent 2 }}
|
||||||
enable_shm_volume: {{ .Values.enable_shm_volume }}
|
|
||||||
repair_period: {{ .Values.repair_period }}
|
|
||||||
resync_period: {{ .Values.resync_period }}
|
|
||||||
workers: {{ .Values.workers }}
|
|
||||||
{{ toYaml .Values.configCRD | indent 2 }}
|
|
||||||
users:
|
users:
|
||||||
{{ toYaml .Values.configUsers | indent 4 }}
|
{{ toYaml .Values.configUsers | indent 4 }}
|
||||||
kubernetes:
|
kubernetes:
|
||||||
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
|
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
|
||||||
pod_service_account_name: operator
|
pod_service_account_name: operator
|
||||||
spilo_privileged: {{ .Values.spilo_privileged }}
|
|
||||||
{{ toYaml .Values.configKubernetes | indent 4 }}
|
{{ toYaml .Values.configKubernetes | indent 4 }}
|
||||||
{{ toYaml .Values.configKubernetesCRD | indent 4 }}
|
|
||||||
postgres_pod_resources:
|
postgres_pod_resources:
|
||||||
{{ toYaml .Values.configPostgresPodResources | indent 4 }}
|
{{ toYaml .Values.configPostgresPodResources | indent 4 }}
|
||||||
timeouts:
|
timeouts:
|
||||||
{{ toYaml .Values.configTimeouts | indent 4 }}
|
{{ toYaml .Values.configTimeouts | indent 4 }}
|
||||||
load_balancer:
|
load_balancer:
|
||||||
{{ toYaml .Values.configLoadBalancerCRD | indent 4 }}
|
{{ toYaml .Values.configLoadBalancer | indent 4 }}
|
||||||
aws_or_gcp:
|
aws_or_gcp:
|
||||||
{{ toYaml .Values.configAwsOrGcp | indent 4 }}
|
{{ toYaml .Values.configAwsOrGcp | indent 4 }}
|
||||||
logical_backup:
|
logical_backup:
|
||||||
|
|
@ -35,8 +29,9 @@ configuration:
|
||||||
debug:
|
debug:
|
||||||
{{ toYaml .Values.configDebug | indent 4 }}
|
{{ toYaml .Values.configDebug | indent 4 }}
|
||||||
teams_api:
|
teams_api:
|
||||||
{{ toYaml .Values.configTeamsApiCRD | indent 4 }}
|
{{ toYaml .Values.configTeamsApi | indent 4 }}
|
||||||
logging_rest_api:
|
logging_rest_api:
|
||||||
{{ toYaml .Values.configLoggingRestApi | indent 4 }}
|
{{ toYaml .Values.configLoggingRestApi | indent 4 }}
|
||||||
scalyr:
|
scalyr:
|
||||||
{{ toYaml .Values.configScalyr | indent 4 }}
|
{{ toYaml .Values.configScalyr | indent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,270 @@
|
||||||
|
image:
|
||||||
|
registry: registry.opensource.zalan.do
|
||||||
|
repository: acid/postgres-operator
|
||||||
|
tag: v1.1.0-54-g3a914f9
|
||||||
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
|
# Optionally specify an array of imagePullSecrets.
|
||||||
|
# Secrets must be manually created in the namespace.
|
||||||
|
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
|
||||||
|
# imagePullSecrets:
|
||||||
|
# - name: myRegistryKeySecretName
|
||||||
|
|
||||||
|
podAnnotations: {}
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
configTarget: "OperatorConfigurationCRD"
|
||||||
|
|
||||||
|
# general top-level configuration parameters
|
||||||
|
configGeneral:
|
||||||
|
# start any new database pod without limitations on shm memory
|
||||||
|
enable_shm_volume: true
|
||||||
|
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||||
|
etcd_host: ""
|
||||||
|
# Spilo docker image
|
||||||
|
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p9
|
||||||
|
# max number of instances in Postgres cluster. -1 = no limit
|
||||||
|
min_instances: -1
|
||||||
|
# min number of instances in Postgres cluster. -1 = no limit
|
||||||
|
max_instances: -1
|
||||||
|
# period between consecutive repair requests
|
||||||
|
repair_period: 5m
|
||||||
|
# period between consecutive sync requests
|
||||||
|
resync_period: 30m
|
||||||
|
# can prevent certain cases of memory overcommitment
|
||||||
|
# set_memory_request_to_limit: false
|
||||||
|
|
||||||
|
# map of sidecar names to docker images
|
||||||
|
# sidecar_docker_images
|
||||||
|
# example: "exampleimage:exampletag"
|
||||||
|
|
||||||
|
# number of routines the operator spawns to process requests concurrently
|
||||||
|
workers: 4
|
||||||
|
|
||||||
|
# parameters describing Postgres users
|
||||||
|
configUsers:
|
||||||
|
# postgres username used for replication between instances
|
||||||
|
replication_username: standby
|
||||||
|
# postgres superuser name to be created by initdb
|
||||||
|
super_username: postgres
|
||||||
|
|
||||||
|
configKubernetes:
|
||||||
|
# default DNS domain of K8s cluster where operator is running
|
||||||
|
cluster_domain: cluster.local
|
||||||
|
# additional labels assigned to the cluster objects
|
||||||
|
cluster_labels:
|
||||||
|
application: spilo
|
||||||
|
# label assigned to Kubernetes objects created by the operator
|
||||||
|
cluster_name_label: cluster-name
|
||||||
|
# toggles pod anti affinity on the Postgres pods
|
||||||
|
enable_pod_antiaffinity: false
|
||||||
|
# toggles PDB to set to MinAvailabe 0 or 1
|
||||||
|
enable_pod_disruption_budget: true
|
||||||
|
# name of the secret containing infrastructure roles names and passwords
|
||||||
|
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||||
|
|
||||||
|
# list of labels that can be inherited from the cluster manifest
|
||||||
|
# inherited_labels:
|
||||||
|
# - application
|
||||||
|
# - app
|
||||||
|
|
||||||
|
# timeout for successful migration of master pods from unschedulable node
|
||||||
|
# master_pod_move_timeout: 20m
|
||||||
|
|
||||||
|
# set of labels that a running and active node should possess to be considered ready
|
||||||
|
# node_readiness_label: ""
|
||||||
|
|
||||||
|
# name of the secret containing the OAuth2 token to pass to the teams API
|
||||||
|
# oauth_token_secret_name: postgresql-operator
|
||||||
|
|
||||||
|
# defines the template for PDB (Pod Disruption Budget) names
|
||||||
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
|
# override topology key for pod anti affinity
|
||||||
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
|
# name of the ConfigMap with environment variables to populate on every pod
|
||||||
|
# pod_environment_configmap: ""
|
||||||
|
|
||||||
|
# specify the pod management policy of stateful sets of Postgres clusters
|
||||||
|
pod_management_policy: "ordered_ready"
|
||||||
|
# label assigned to the Postgres pods (and services/endpoints)
|
||||||
|
pod_role_label: spilo-role
|
||||||
|
# Postgres pods are terminated forcefully after this timeout
|
||||||
|
pod_terminate_grace_period: 5m
|
||||||
|
# template for database user secrets generated by the operator
|
||||||
|
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||||
|
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||||
|
# spilo_fsgroup: 103
|
||||||
|
|
||||||
|
# whether the Spilo container should run in privileged mode
|
||||||
|
spilo_privileged: false
|
||||||
|
# operator watches for postgres objects in the given namespace
|
||||||
|
watched_namespace: "*" # listen to all namespaces
|
||||||
|
|
||||||
|
# configure resource requests for the Postgres pods
|
||||||
|
configPostgresPodResources:
|
||||||
|
# CPU limits for the postgres containers
|
||||||
|
default_cpu_limit: "3"
|
||||||
|
# cpu request value for the postgres containers
|
||||||
|
default_cpu_request: 100m
|
||||||
|
# memory limits for the postgres containers
|
||||||
|
default_memory_limit: 1Gi
|
||||||
|
# memory request value for the postgres containers
|
||||||
|
default_memory_request: 100Mi
|
||||||
|
|
||||||
|
# timeouts related to some operator actions
|
||||||
|
configTimeouts:
|
||||||
|
# timeout when waiting for the Postgres pods to be deleted
|
||||||
|
pod_deletion_wait_timeout: 10m
|
||||||
|
# timeout when waiting for pod role and cluster labels
|
||||||
|
pod_label_wait_timeout: 10m
|
||||||
|
# interval between consecutive attempts waiting for postgresql CRD to be created
|
||||||
|
ready_wait_interval: 3s
|
||||||
|
# timeout for the complete postgres CRD creation
|
||||||
|
ready_wait_timeout: 30s
|
||||||
|
# interval to wait between consecutive attempts to check for some K8s resources
|
||||||
|
resource_check_interval: 3s
|
||||||
|
# timeout when waiting for the presence of a certain K8s resource (e.g. Sts, PDB)
|
||||||
|
resource_check_timeout: 10m
|
||||||
|
|
||||||
|
# configure behavior of load balancers
|
||||||
|
configLoadBalancer:
|
||||||
|
# DNS zone for cluster DNS name when load balancer is configured for cluster
|
||||||
|
db_hosted_zone: db.example.com
|
||||||
|
# annotations to apply to service when load balancing is enabled
|
||||||
|
# custom_service_annotations:
|
||||||
|
# keyx: valuez
|
||||||
|
# keya: valuea
|
||||||
|
|
||||||
|
# toggles service type load balancer pointing to the master pod of the cluster
|
||||||
|
enable_master_load_balancer: true
|
||||||
|
# toggles service type load balancer pointing to the replica pod of the cluster
|
||||||
|
enable_replica_load_balancer: false
|
||||||
|
# defines the DNS name string template for the master load balancer cluster
|
||||||
|
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||||
|
# defines the DNS name string template for the replica load balancer cluster
|
||||||
|
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||||
|
|
||||||
|
# options to aid debugging of the operator itself
|
||||||
|
configDebug:
|
||||||
|
# toggles verbose debug logs from the operator
|
||||||
|
debug_logging: true
|
||||||
|
# toggles operator functionality that require access to the postgres database
|
||||||
|
enable_database_access: true
|
||||||
|
|
||||||
|
# parameters affecting logging and REST API listener
|
||||||
|
configLoggingRestApi:
|
||||||
|
# REST API listener listens to this port
|
||||||
|
api_port: 8080
|
||||||
|
# number of entries in the cluster history ring buffer
|
||||||
|
cluster_history_entries: 1000
|
||||||
|
# number of lines in the ring buffer used to store cluster logs
|
||||||
|
ring_log_lines: 100
|
||||||
|
|
||||||
|
# configure interaction with non-Kubernetes objects from AWS or GCP
|
||||||
|
configAwsOrGcp:
|
||||||
|
# Additional Secret (aws or gcp credentials) to mount in the pod
|
||||||
|
# additional_secret_mount: "some-secret-name"
|
||||||
|
|
||||||
|
# Path to mount the above Secret in the filesystem of the container(s)
|
||||||
|
# additional_secret_mount_path: "/some/dir"
|
||||||
|
|
||||||
|
# AWS region used to store ESB volumes
|
||||||
|
aws_region: eu-central-1
|
||||||
|
|
||||||
|
# AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods
|
||||||
|
# kube_iam_role: ""
|
||||||
|
|
||||||
|
# S3 bucket to use for shipping postgres daily logs
|
||||||
|
# log_s3_bucket: ""
|
||||||
|
|
||||||
|
# S3 bucket to use for shipping WAL segments with WAL-E
|
||||||
|
# wal_s3_bucket: ""
|
||||||
|
|
||||||
|
# configure K8s cron job managed by the operator
|
||||||
|
configLogicalBackup:
|
||||||
|
# backup schedule in the cron format
|
||||||
|
logical_backup_schedule: "30 00 * * *"
|
||||||
|
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||||
|
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
||||||
|
# S3 bucket to store backup results
|
||||||
|
logical_backup_s3_bucket: "my-bucket-url"
|
||||||
|
|
||||||
|
# automate creation of human users with teams API service
|
||||||
|
configTeamsApi:
|
||||||
|
# team_admin_role will have the rights to grant roles coming from PG manifests
|
||||||
|
# enable_admin_role_for_users: true
|
||||||
|
|
||||||
|
# toggle to grant superuser to team members created from the Teams API
|
||||||
|
enable_team_superuser: false
|
||||||
|
# toggles usage of the Teams API by the operator
|
||||||
|
enable_teams_api: false
|
||||||
|
# should contain a URL to use for authentication (username and token)
|
||||||
|
# pam_configuration: ""
|
||||||
|
|
||||||
|
# operator will add all team member roles to this group and add a pg_hba line
|
||||||
|
pam_role_name: zalandos
|
||||||
|
# List of teams which members need the superuser role in each Postgres cluster
|
||||||
|
# postgres_superuser_teams: "postgres_superusers"
|
||||||
|
|
||||||
|
# List of roles that cannot be overwritten by an application, team or infrastructure role
|
||||||
|
protected_role_names:
|
||||||
|
- admin
|
||||||
|
# role name to grant to team members created from the Teams API
|
||||||
|
team_admin_role: admin
|
||||||
|
# postgres config parameters to apply to each team member role
|
||||||
|
team_api_role_configuration:
|
||||||
|
log_statement: all
|
||||||
|
# URL of the Teams API service
|
||||||
|
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||||
|
|
||||||
|
# Scalyr is a log management tool that Zalando uses as a sidecar
|
||||||
|
scalyr:
|
||||||
|
# API key for the Scalyr sidecar
|
||||||
|
# scalyr_api_key: ""
|
||||||
|
|
||||||
|
# Docker image for the Scalyr sidecar
|
||||||
|
# scalyr_image: ""
|
||||||
|
|
||||||
|
# CPU limit value for the Scalyr sidecar
|
||||||
|
scalyr_cpu_limit: "1"
|
||||||
|
# CPU rquest value for the Scalyr sidecar
|
||||||
|
scalyr_cpu_request: 100m
|
||||||
|
# Memory limit value for the Scalyr sidecar
|
||||||
|
scalyr_memory_limit: 1Gi
|
||||||
|
# Memory request value for the Scalyr sidecar
|
||||||
|
scalyr_memory_request: 50Mi
|
||||||
|
|
||||||
|
rbac:
|
||||||
|
# Specifies whether RBAC resources should be created
|
||||||
|
create: true
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a ServiceAccount should be created
|
||||||
|
create: true
|
||||||
|
# The name of the ServiceAccount to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
# When relying solely on the OperatorConfiguration CRD, set this value to "operator"
|
||||||
|
# Otherwise, the operator tries to use the "default" service account which is forbidden
|
||||||
|
name: "operator"
|
||||||
|
|
||||||
|
priorityClassName: ""
|
||||||
|
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 300Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 300Mi
|
||||||
|
|
||||||
|
# Affinity for pod assignment
|
||||||
|
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
# Tolerations for pod assignment
|
||||||
|
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
# Node labels for pod assignment
|
||||||
|
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||||
|
nodeSelector: {}
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
image:
|
image:
|
||||||
registry: registry.opensource.zalan.do
|
registry: registry.opensource.zalan.do
|
||||||
repository: acid/postgres-operator
|
repository: acid/postgres-operator
|
||||||
tag: v1.1.0-28-g24d412a
|
tag: v1.1.0-54-g3a914f9
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
# Optionally specify an array of imagePullSecrets.
|
# Optionally specify an array of imagePullSecrets.
|
||||||
|
|
@ -13,148 +13,208 @@ image:
|
||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
podLabels: {}
|
podLabels: {}
|
||||||
|
|
||||||
# config shared from ConfigMap and CRD
|
configTarget: "ConfigMap"
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p7
|
|
||||||
enable_shm_volume: true
|
|
||||||
repair_period: 5m
|
|
||||||
resync_period: 5m
|
|
||||||
spilo_privileged: false
|
|
||||||
workers: 4
|
|
||||||
|
|
||||||
|
# general configuration parameters
|
||||||
|
configGeneral:
|
||||||
|
# start any new database pod without limitations on shm memory
|
||||||
|
enable_shm_volume: "true"
|
||||||
|
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||||
|
etcd_host: ""
|
||||||
|
# Spilo docker image
|
||||||
|
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p9
|
||||||
|
# max number of instances in Postgres cluster. -1 = no limit
|
||||||
|
min_instances: "-1"
|
||||||
|
# min number of instances in Postgres cluster. -1 = no limit
|
||||||
|
max_instances: "-1"
|
||||||
|
# period between consecutive repair requests
|
||||||
|
repair_period: 5m
|
||||||
|
# period between consecutive sync requests
|
||||||
|
resync_period: 30m
|
||||||
|
# can prevent certain cases of memory overcommitment
|
||||||
|
# set_memory_request_to_limit: "false"
|
||||||
|
|
||||||
|
# map of sidecar names to docker images
|
||||||
|
# sidecar_docker_images: ""
|
||||||
|
|
||||||
|
# number of routines the operator spawns to process requests concurrently
|
||||||
|
workers: "4"
|
||||||
|
|
||||||
|
# parameters describing Postgres users
|
||||||
configUsers:
|
configUsers:
|
||||||
|
# postgres username used for replication between instances
|
||||||
replication_username: standby
|
replication_username: standby
|
||||||
|
# postgres superuser name to be created by initdb
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
|
|
||||||
configKubernetes:
|
configKubernetes:
|
||||||
|
# default DNS domain of K8s cluster where operator is running
|
||||||
cluster_domain: cluster.local
|
cluster_domain: cluster.local
|
||||||
# inherited_labels: ""
|
# additional labels assigned to the cluster objects
|
||||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
|
||||||
# node_readiness_label: ""
|
|
||||||
# oauth_token_secret_name: postgresql-operator
|
|
||||||
# pod_environment_configmap: ""
|
|
||||||
# spilo_fsgroup: "103"
|
|
||||||
pod_management_policy: "ordered_ready"
|
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
|
||||||
pod_role_label: spilo-role
|
|
||||||
pod_terminate_grace_period: 5m
|
|
||||||
secret_name_template: '{username}.{cluster}.credentials'
|
|
||||||
|
|
||||||
configPostgresPodResources:
|
|
||||||
default_cpu_request: 100m
|
|
||||||
default_memory_request: 100Mi
|
|
||||||
default_cpu_limit: "3"
|
|
||||||
default_memory_limit: 1Gi
|
|
||||||
# set_memory_request_to_limit: true
|
|
||||||
|
|
||||||
configTimeouts:
|
|
||||||
# master_pod_move_timeout: 10m
|
|
||||||
pod_deletion_wait_timeout: 10m
|
|
||||||
pod_label_wait_timeout: 10m
|
|
||||||
ready_wait_interval: 3s
|
|
||||||
ready_wait_timeout: 30s
|
|
||||||
resource_check_interval: 3s
|
|
||||||
resource_check_timeout: 10m
|
|
||||||
|
|
||||||
configDebug:
|
|
||||||
debug_logging: true
|
|
||||||
enable_database_access: true
|
|
||||||
|
|
||||||
configLoggingRestApi:
|
|
||||||
api_port: 8080
|
|
||||||
cluster_history_entries: 1000
|
|
||||||
ring_log_lines: 100
|
|
||||||
|
|
||||||
configAwsOrGcp:
|
|
||||||
aws_region: eu-central-1
|
|
||||||
db_hosted_zone: db.example.com
|
|
||||||
# kube_iam_role: ""
|
|
||||||
# log_s3_bucket: ""
|
|
||||||
# wal_s3_bucket: ""
|
|
||||||
# additional_secret_mount: "some-secret-name"
|
|
||||||
# additional_secret_mount_path: "/some/dir"
|
|
||||||
|
|
||||||
configLogicalBackup:
|
|
||||||
logical_backup_schedule: "30 00 * * *"
|
|
||||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
|
||||||
logical_backup_s3_bucket: ""
|
|
||||||
|
|
||||||
# config exclusive to ConfigMap
|
|
||||||
configMap:
|
|
||||||
cluster_labels: application:spilo
|
cluster_labels: application:spilo
|
||||||
|
# label assigned to Kubernetes objects created by the operator
|
||||||
cluster_name_label: version
|
cluster_name_label: version
|
||||||
|
# toggles pod anti affinity on the Postgres pods
|
||||||
|
enable_pod_antiaffinity: "false"
|
||||||
|
# toggles PDB to set to MinAvailabe 0 or 1
|
||||||
|
enable_pod_disruption_budget: "true"
|
||||||
|
# name of the secret containing infrastructure roles names and passwords
|
||||||
|
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||||
|
|
||||||
|
# list of labels that can be inherited from the cluster manifest
|
||||||
|
# inherited_labels: ""
|
||||||
|
|
||||||
|
# timeout for successful migration of master pods from unschedulable node
|
||||||
|
# master_pod_move_timeout: 20m
|
||||||
|
|
||||||
|
# set of labels that a running and active node should possess to be considered ready
|
||||||
|
# node_readiness_label: ""
|
||||||
|
|
||||||
|
# name of the secret containing the OAuth2 token to pass to the teams API
|
||||||
|
# oauth_token_secret_name: postgresql-operator
|
||||||
|
|
||||||
|
# defines the template for PDB (Pod Disruption Budget) names
|
||||||
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
|
# override topology key for pod anti affinity
|
||||||
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
|
# name of the ConfigMap with environment variables to populate on every pod
|
||||||
|
# pod_environment_configmap: ""
|
||||||
|
|
||||||
|
# specify the pod management policy of stateful sets of Postgres clusters
|
||||||
|
pod_management_policy: "ordered_ready"
|
||||||
|
# label assigned to the Postgres pods (and services/endpoints)
|
||||||
|
pod_role_label: spilo-role
|
||||||
|
# Postgres pods are terminated forcefully after this timeout
|
||||||
|
pod_terminate_grace_period: 5m
|
||||||
|
# template for database user secrets generated by the operator
|
||||||
|
secret_name_template: '{username}.{cluster}.credentials'
|
||||||
|
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||||
|
# spilo_fsgroup: "103"
|
||||||
|
|
||||||
|
# whether the Spilo container should run in privileged mode
|
||||||
|
spilo_privileged: "false"
|
||||||
|
# operator watches for postgres objects in the given namespace
|
||||||
watched_namespace: "*" # listen to all namespaces
|
watched_namespace: "*" # listen to all namespaces
|
||||||
|
|
||||||
|
# configure resource requests for the Postgres pods
|
||||||
|
configPostgresPodResources:
|
||||||
|
# CPU limits for the postgres containers
|
||||||
|
default_cpu_limit: "3"
|
||||||
|
# cpu request value for the postgres containers
|
||||||
|
default_cpu_request: 100m
|
||||||
|
# memory limits for the postgres containers
|
||||||
|
default_memory_limit: 1Gi
|
||||||
|
# memory request value for the postgres containers
|
||||||
|
default_memory_request: 100Mi
|
||||||
|
|
||||||
|
# timeouts related to some operator actions
|
||||||
|
configTimeouts:
|
||||||
|
# timeout when waiting for the Postgres pods to be deleted
|
||||||
|
pod_deletion_wait_timeout: 10m
|
||||||
|
# timeout when waiting for pod role and cluster labels
|
||||||
|
pod_label_wait_timeout: 10m
|
||||||
|
# interval between consecutive attempts waiting for postgresql CRD to be created
|
||||||
|
ready_wait_interval: 3s
|
||||||
|
# timeout for the complete postgres CRD creation
|
||||||
|
ready_wait_timeout: 30s
|
||||||
|
# interval to wait between consecutive attempts to check for some K8s resources
|
||||||
|
resource_check_interval: 3s
|
||||||
|
# timeout when waiting for the presence of a certain K8s resource (e.g. Sts, PDB)
|
||||||
|
resource_check_timeout: 10m
|
||||||
|
|
||||||
|
# configure behavior of load balancers
|
||||||
configLoadBalancer:
|
configLoadBalancer:
|
||||||
|
# DNS zone for cluster DNS name when load balancer is configured for cluster
|
||||||
|
db_hosted_zone: db.example.com
|
||||||
|
# annotations to apply to service when load balancing is enabled
|
||||||
# custom_service_annotations:
|
# custom_service_annotations:
|
||||||
# "keyx:valuez,keya:valuea"
|
# "keyx:valuez,keya:valuea"
|
||||||
|
|
||||||
|
# toggles service type load balancer pointing to the master pod of the cluster
|
||||||
enable_master_load_balancer: "true"
|
enable_master_load_balancer: "true"
|
||||||
|
# toggles service type load balancer pointing to the replica pod of the cluster
|
||||||
enable_replica_load_balancer: "false"
|
enable_replica_load_balancer: "false"
|
||||||
|
# defines the DNS name string template for the master load balancer cluster
|
||||||
master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}'
|
master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}'
|
||||||
|
# defines the DNS name string template for the replica load balancer cluster
|
||||||
replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}'
|
replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}'
|
||||||
|
|
||||||
|
# options to aid debugging of the operator itself
|
||||||
|
configDebug:
|
||||||
|
# toggles verbose debug logs from the operator
|
||||||
|
debug_logging: "true"
|
||||||
|
# toggles operator functionality that require access to the postgres database
|
||||||
|
enable_database_access: "true"
|
||||||
|
|
||||||
|
# parameters affecting logging and REST API listener
|
||||||
|
configLoggingRestApi:
|
||||||
|
# REST API listener listens to this port
|
||||||
|
api_port: "8080"
|
||||||
|
# number of entries in the cluster history ring buffer
|
||||||
|
cluster_history_entries: "1000"
|
||||||
|
# number of lines in the ring buffer used to store cluster logs
|
||||||
|
ring_log_lines: "100"
|
||||||
|
|
||||||
|
# configure interaction with non-Kubernetes objects from AWS or GCP
|
||||||
|
configAwsOrGcp:
|
||||||
|
# Additional Secret (aws or gcp credentials) to mount in the pod
|
||||||
|
# additional_secret_mount: "some-secret-name"
|
||||||
|
|
||||||
|
# Path to mount the above Secret in the filesystem of the container(s)
|
||||||
|
# additional_secret_mount_path: "/some/dir"
|
||||||
|
|
||||||
|
# AWS region used to store ESB volumes
|
||||||
|
aws_region: eu-central-1
|
||||||
|
|
||||||
|
# AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods
|
||||||
|
# kube_iam_role: ""
|
||||||
|
|
||||||
|
# S3 bucket to use for shipping postgres daily logs
|
||||||
|
# log_s3_bucket: ""
|
||||||
|
|
||||||
|
# S3 bucket to use for shipping WAL segments with WAL-E
|
||||||
|
# wal_s3_bucket: ""
|
||||||
|
|
||||||
|
# configure K8s cron job managed by the operator
|
||||||
|
configLogicalBackup:
|
||||||
|
# backup schedule in the cron format
|
||||||
|
logical_backup_schedule: "30 00 * * *"
|
||||||
|
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||||
|
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
||||||
|
# S3 bucket to store backup results
|
||||||
|
logical_backup_s3_bucket: "my-bucket-url"
|
||||||
|
|
||||||
|
# automate creation of human users with teams API service
|
||||||
configTeamsApi:
|
configTeamsApi:
|
||||||
enable_teams_api: "false"
|
# team_admin_role will have the rights to grant roles coming from PG manifests
|
||||||
# enable_admin_role_for_users: "true"
|
# enable_admin_role_for_users: "true"
|
||||||
|
|
||||||
|
# toggle to grant superuser to team members created from the Teams API
|
||||||
# enable_team_superuser: "false"
|
# enable_team_superuser: "false"
|
||||||
|
|
||||||
|
# toggles usage of the Teams API by the operator
|
||||||
|
enable_teams_api: "false"
|
||||||
|
# should contain a URL to use for authentication (username and token)
|
||||||
# pam_configuration: https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
# pam_configuration: https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
||||||
|
|
||||||
|
# operator will add all team member roles to this group and add a pg_hba line
|
||||||
# pam_role_name: zalandos
|
# pam_role_name: zalandos
|
||||||
|
|
||||||
|
# List of teams which members need the superuser role in each Postgres cluster
|
||||||
# postgres_superuser_teams: "postgres_superusers"
|
# postgres_superuser_teams: "postgres_superusers"
|
||||||
|
|
||||||
|
# List of roles that cannot be overwritten by an application, team or infrastructure role
|
||||||
|
# protected_role_names: "admin"
|
||||||
|
|
||||||
|
# role name to grant to team members created from the Teams API
|
||||||
# team_admin_role: "admin"
|
# team_admin_role: "admin"
|
||||||
|
|
||||||
|
# postgres config parameters to apply to each team member role
|
||||||
# team_api_role_configuration: "log_statement:all"
|
# team_api_role_configuration: "log_statement:all"
|
||||||
|
|
||||||
|
# URL of the Teams API service
|
||||||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||||
|
|
||||||
# config exclusive to CRD
|
|
||||||
configCRD:
|
|
||||||
etcd_host: ""
|
|
||||||
min_instances: -1
|
|
||||||
max_instances: -1
|
|
||||||
# sidecar_docker_images
|
|
||||||
# example: "exampleimage:exampletag"
|
|
||||||
|
|
||||||
configKubernetesCRD:
|
|
||||||
cluster_labels:
|
|
||||||
application: spilo
|
|
||||||
cluster_name_label: cluster-name
|
|
||||||
enable_pod_antiaffinity: false
|
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
|
||||||
enable_pod_disruption_budget: true
|
|
||||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
|
||||||
# inherited_labels:
|
|
||||||
# - application
|
|
||||||
# - app
|
|
||||||
# watched_namespace: ""
|
|
||||||
|
|
||||||
configLoadBalancerCRD:
|
|
||||||
# custom_service_annotations:
|
|
||||||
# keyx: valuez
|
|
||||||
# keya: valuea
|
|
||||||
enable_master_load_balancer: false
|
|
||||||
enable_replica_load_balancer: false
|
|
||||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
|
||||||
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
|
||||||
|
|
||||||
configTeamsApiCRD:
|
|
||||||
enable_teams_api: false
|
|
||||||
enable_team_superuser: false
|
|
||||||
# pam_configuration: ""
|
|
||||||
pam_role_name: zalandos
|
|
||||||
# postgres_superuser_teams: "postgres_superusers"
|
|
||||||
protected_role_names:
|
|
||||||
- admin
|
|
||||||
team_admin_role: admin
|
|
||||||
team_api_role_configuration:
|
|
||||||
log_statement: all
|
|
||||||
# teams_api_url: ""
|
|
||||||
|
|
||||||
scalyr:
|
|
||||||
scalyr_cpu_request: 100m
|
|
||||||
scalyr_memory_request: 50Mi
|
|
||||||
scalyr_cpu_limit: "1"
|
|
||||||
scalyr_memory_limit: 1Gi
|
|
||||||
# scalyr_api_key: ""
|
|
||||||
# scalyr_image: ""
|
|
||||||
# scalyr_server_url: ""
|
|
||||||
|
|
||||||
rbac:
|
rbac:
|
||||||
# Specifies whether RBAC resources should be created
|
# Specifies whether RBAC resources should be created
|
||||||
create: true
|
create: true
|
||||||
|
|
|
||||||
|
|
@ -115,6 +115,16 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
* **repair_period**
|
* **repair_period**
|
||||||
period between consecutive repair requests. The default is `5m`.
|
period between consecutive repair requests. The default is `5m`.
|
||||||
|
|
||||||
|
* **set_memory_request_to_limit**
|
||||||
|
Set `memory_request` to `memory_limit` for all Postgres clusters (the default
|
||||||
|
value is also increased). This prevents certain cases of memory overcommitment
|
||||||
|
at the cost of overprovisioning memory and potential scheduling problems for
|
||||||
|
containers with high memory limits due to the lack of memory on Kubernetes
|
||||||
|
cluster nodes. This affects all containers created by the operator (Postgres,
|
||||||
|
Scalyr sidecar, and other sidecars); to set resources for the operator's own
|
||||||
|
container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13).
|
||||||
|
The default is `false`.
|
||||||
|
|
||||||
## Postgres users
|
## Postgres users
|
||||||
|
|
||||||
Parameters describing Postgres users. In a CRD-configuration, they are grouped
|
Parameters describing Postgres users. In a CRD-configuration, they are grouped
|
||||||
|
|
@ -297,16 +307,6 @@ CRD-based configuration.
|
||||||
memory limits for the Postgres containers, unless overridden by cluster-specific
|
memory limits for the Postgres containers, unless overridden by cluster-specific
|
||||||
settings. The default is `1Gi`.
|
settings. The default is `1Gi`.
|
||||||
|
|
||||||
* **set_memory_request_to_limit**
|
|
||||||
Set `memory_request` to `memory_limit` for all Postgres clusters (the default
|
|
||||||
value is also increased). This prevents certain cases of memory overcommitment
|
|
||||||
at the cost of overprovisioning memory and potential scheduling problems for
|
|
||||||
containers with high memory limits due to the lack of memory on Kubernetes
|
|
||||||
cluster nodes. This affects all containers created by the operator (Postgres,
|
|
||||||
Scalyr sidecar, and other sidecars); to set resources for the operator's own
|
|
||||||
container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13).
|
|
||||||
The default is `false`.
|
|
||||||
|
|
||||||
## Operator timeouts
|
## Operator timeouts
|
||||||
|
|
||||||
This set of parameters define various timeouts related to some operator
|
This set of parameters define various timeouts related to some operator
|
||||||
|
|
@ -501,7 +501,7 @@ key.
|
||||||
`https://info.example.com/oauth2/tokeninfo?access_token= uid
|
`https://info.example.com/oauth2/tokeninfo?access_token= uid
|
||||||
realm=/employees`.
|
realm=/employees`.
|
||||||
|
|
||||||
* **protected_roles**
|
* **protected_role_names**
|
||||||
List of roles that cannot be overwritten by an application, team or
|
List of roles that cannot be overwritten by an application, team or
|
||||||
infrastructure role. The default is `admin`.
|
infrastructure role. The default is `admin`.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,9 @@
|
||||||
apiVersion: "acid.zalan.do/v1"
|
apiVersion: "acid.zalan.do/v1"
|
||||||
kind: postgresql
|
kind: postgresql
|
||||||
|
|
||||||
metadata:
|
metadata:
|
||||||
name: acid-test-cluster
|
name: acid-test-cluster
|
||||||
spec:
|
spec:
|
||||||
|
dockerImage: registry.opensource.zalan.do/acid/spilo-11:1.5-p9
|
||||||
initContainers:
|
initContainers:
|
||||||
- name: date
|
- name: date
|
||||||
image: busybox
|
image: busybox
|
||||||
|
|
@ -11,9 +11,9 @@ spec:
|
||||||
teamId: "ACID"
|
teamId: "ACID"
|
||||||
volume:
|
volume:
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
#storageClass: my-sc
|
# storageClass: my-sc
|
||||||
numberOfInstances: 2
|
numberOfInstances: 2
|
||||||
users: #Application/Robot users
|
users: # Application/Robot users
|
||||||
zalando:
|
zalando:
|
||||||
- superuser
|
- superuser
|
||||||
- createdb
|
- createdb
|
||||||
|
|
@ -23,8 +23,11 @@ spec:
|
||||||
- 127.0.0.1/32
|
- 127.0.0.1/32
|
||||||
databases:
|
databases:
|
||||||
foo: zalando
|
foo: zalando
|
||||||
#Expert section
|
|
||||||
|
# Expert section
|
||||||
|
|
||||||
enableShmVolume: true
|
enableShmVolume: true
|
||||||
|
# spiloFSGroup: 103
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "10"
|
version: "10"
|
||||||
parameters:
|
parameters:
|
||||||
|
|
@ -38,7 +41,6 @@ spec:
|
||||||
limits:
|
limits:
|
||||||
cpu: 300m
|
cpu: 300m
|
||||||
memory: 300Mi
|
memory: 300Mi
|
||||||
# spiloFSGroup: 103
|
|
||||||
patroni:
|
patroni:
|
||||||
initdb:
|
initdb:
|
||||||
encoding: "UTF8"
|
encoding: "UTF8"
|
||||||
|
|
@ -47,42 +49,42 @@ spec:
|
||||||
pg_hba:
|
pg_hba:
|
||||||
- hostssl all all 0.0.0.0/0 md5
|
- hostssl all all 0.0.0.0/0 md5
|
||||||
- host all all 0.0.0.0/0 md5
|
- host all all 0.0.0.0/0 md5
|
||||||
#slots:
|
# slots:
|
||||||
# permanent_physical_1:
|
# permanent_physical_1:
|
||||||
# type: physical
|
# type: physical
|
||||||
# permanent_logical_1:
|
# permanent_logical_1:
|
||||||
# type: logical
|
# type: logical
|
||||||
# database: foo
|
# database: foo
|
||||||
# plugin: pgoutput
|
# plugin: pgoutput
|
||||||
ttl: 30
|
ttl: 30
|
||||||
loop_wait: &loop_wait 10
|
loop_wait: &loop_wait 10
|
||||||
retry_timeout: 10
|
retry_timeout: 10
|
||||||
maximum_lag_on_failover: 33554432
|
maximum_lag_on_failover: 33554432
|
||||||
# restore a Postgres DB with point-in-time-recovery
|
# restore a Postgres DB with point-in-time-recovery
|
||||||
# with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp
|
# with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp
|
||||||
# with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup
|
# with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup
|
||||||
# clone:
|
# clone:
|
||||||
# uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
# uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
||||||
# cluster: "acid-batman"
|
# cluster: "acid-batman"
|
||||||
# timestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6)
|
# timestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6)
|
||||||
# s3_wal_path: "s3://custom/path/to/bucket"
|
# s3_wal_path: "s3://custom/path/to/bucket"
|
||||||
|
|
||||||
# run periodic backups with k8s cron jobs
|
# run periodic backups with k8s cron jobs
|
||||||
# enableLogicalBackup: true
|
# enableLogicalBackup: true
|
||||||
# logicalBackupSchedule: "30 00 * * *"
|
# logicalBackupSchedule: "30 00 * * *"
|
||||||
maintenanceWindows:
|
maintenanceWindows:
|
||||||
- 01:00-06:00 #UTC
|
- 01:00-06:00 #UTC
|
||||||
- Sat:00:00-04:00
|
- Sat:00:00-04:00
|
||||||
#sidecars:
|
# sidecars:
|
||||||
# - name: "telegraf-sidecar"
|
# - name: "telegraf-sidecar"
|
||||||
# image: "telegraf:latest"
|
# image: "telegraf:latest"
|
||||||
# resources:
|
# resources:
|
||||||
# limits:
|
# limits:
|
||||||
# cpu: 500m
|
# cpu: 500m
|
||||||
# memory: 500Mi
|
# memory: 500Mi
|
||||||
# requests:
|
# requests:
|
||||||
# cpu: 100m
|
# cpu: 100m
|
||||||
# memory: 100Mi
|
# memory: 100Mi
|
||||||
# env:
|
# env:
|
||||||
# - name: "USEFUL_VAR"
|
# - name: "USEFUL_VAR"
|
||||||
# value: "perhaps-true"
|
# value: "perhaps-true"
|
||||||
|
|
|
||||||
|
|
@ -3,62 +3,78 @@ kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: postgres-operator
|
name: postgres-operator
|
||||||
data:
|
data:
|
||||||
watched_namespace: "*" # listen to all namespaces
|
|
||||||
cluster_labels: application:spilo
|
|
||||||
cluster_name_label: version
|
|
||||||
pod_role_label: spilo-role
|
|
||||||
|
|
||||||
debug_logging: "true"
|
|
||||||
workers: "4"
|
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p7
|
|
||||||
pod_service_account_name: "zalando-postgres-operator"
|
|
||||||
secret_name_template: '{username}.{cluster}.credentials'
|
|
||||||
cluster_domain: cluster.local
|
|
||||||
super_username: postgres
|
|
||||||
enable_teams_api: "false"
|
|
||||||
spilo_privileged: "false"
|
|
||||||
# enable_shm_volume: "true"
|
|
||||||
# custom_service_annotations:
|
|
||||||
# "keyx:valuez,keya:valuea"
|
|
||||||
# set_memory_request_to_limit: "true"
|
|
||||||
# postgres_superuser_teams: "postgres_superusers"
|
|
||||||
# enable_team_superuser: "false"
|
|
||||||
# team_admin_role: "admin"
|
|
||||||
# enable_admin_role_for_users: "true"
|
|
||||||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
|
||||||
# team_api_role_configuration: "log_statement:all"
|
|
||||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
|
||||||
# oauth_token_secret_name: postgresql-operator
|
|
||||||
# pam_role_name: zalandos
|
|
||||||
# pam_configuration: |
|
|
||||||
# https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
|
||||||
# inherited_labels: ""
|
|
||||||
aws_region: eu-central-1
|
|
||||||
# additional_secret_mount: "some-secret-name"
|
# additional_secret_mount: "some-secret-name"
|
||||||
# additional_secret_mount_path: "/some/dir"
|
# additional_secret_mount_path: "/some/dir"
|
||||||
db_hosted_zone: db.example.com
|
|
||||||
master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}'
|
|
||||||
replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}'
|
|
||||||
enable_master_load_balancer: "true"
|
|
||||||
enable_replica_load_balancer: "false"
|
|
||||||
|
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
|
||||||
|
|
||||||
api_port: "8080"
|
api_port: "8080"
|
||||||
ring_log_lines: "100"
|
aws_region: eu-central-1
|
||||||
|
cluster_domain: cluster.local
|
||||||
cluster_history_entries: "1000"
|
cluster_history_entries: "1000"
|
||||||
pod_terminate_grace_period: 5m
|
cluster_labels: application:spilo
|
||||||
|
cluster_name_label: version
|
||||||
|
# custom_service_annotations:
|
||||||
|
# "keyx:valuez,keya:valuea"
|
||||||
|
db_hosted_zone: db.example.com
|
||||||
|
debug_logging: "true"
|
||||||
|
# default_cpu_limit: "3"
|
||||||
|
# default_cpu_request: 100m
|
||||||
|
# default_memory_limit: 1Gi
|
||||||
|
# default_memory_request: 100Mi
|
||||||
|
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p9
|
||||||
|
# enable_admin_role_for_users: "true"
|
||||||
|
# enable_database_access: "true"
|
||||||
|
enable_master_load_balancer: "true"
|
||||||
|
# enable_pod_antiaffinity: "false"
|
||||||
|
# enable_pod_disruption_budget: "true"
|
||||||
|
enable_replica_load_balancer: "false"
|
||||||
|
# enable_shm_volume: "true"
|
||||||
|
# enable_team_superuser: "false"
|
||||||
|
enable_teams_api: "false"
|
||||||
|
# etcd_host: ""
|
||||||
|
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||||
|
# inherited_labels: ""
|
||||||
|
# kube_iam_role: ""
|
||||||
|
# log_s3_bucket: ""
|
||||||
|
# logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
||||||
|
# logical_backup_s3_bucket: "my-bucket-url"
|
||||||
|
# logical_backup_schedule: "30 00 * * *"
|
||||||
|
master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}'
|
||||||
|
# master_pod_move_timeout: 10m
|
||||||
|
# max_instances: "-1"
|
||||||
|
# min_instances: "-1"
|
||||||
|
# node_readiness_label: ""
|
||||||
|
# oauth_token_secret_name: postgresql-operator
|
||||||
|
# pam_configuration: |
|
||||||
|
# https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
||||||
|
# pam_role_name: zalandos
|
||||||
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
|
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
pod_deletion_wait_timeout: 10m
|
pod_deletion_wait_timeout: 10m
|
||||||
|
# pod_environment_configmap: ""
|
||||||
pod_label_wait_timeout: 10m
|
pod_label_wait_timeout: 10m
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
|
pod_role_label: spilo-role
|
||||||
|
pod_service_account_name: "zalando-postgres-operator"
|
||||||
|
pod_terminate_grace_period: 5m
|
||||||
|
# postgres_superuser_teams: "postgres_superusers"
|
||||||
|
# protected_role_names: "admin"
|
||||||
ready_wait_interval: 3s
|
ready_wait_interval: 3s
|
||||||
ready_wait_timeout: 30s
|
ready_wait_timeout: 30s
|
||||||
# master_pod_move_timeout: 10m
|
repair_period: 5m
|
||||||
|
replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}'
|
||||||
replication_username: standby
|
replication_username: standby
|
||||||
resource_check_interval: 3s
|
resource_check_interval: 3s
|
||||||
resource_check_timeout: 10m
|
resource_check_timeout: 10m
|
||||||
resync_period: 5m
|
resync_period: 5m
|
||||||
|
ring_log_lines: "100"
|
||||||
# logical_backup_schedule: "30 00 * * *"
|
secret_name_template: '{username}.{cluster}.credentials'
|
||||||
# logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
# sidecar_docker_images: ""
|
||||||
# logical_backup_s3_bucket: ""
|
# set_memory_request_to_limit: "false"
|
||||||
|
spilo_privileged: "false"
|
||||||
|
super_username: postgres
|
||||||
|
# team_admin_role: "admin"
|
||||||
|
# team_api_role_configuration: "log_statement:all"
|
||||||
|
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||||
|
# toleration: ""
|
||||||
|
# wal_s3_bucket: ""
|
||||||
|
watched_namespace: "*" # listen to all namespaces
|
||||||
|
workers: "4"
|
||||||
|
|
|
||||||
|
|
@ -9,16 +9,11 @@ spec:
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
numberOfInstances: 2
|
numberOfInstances: 2
|
||||||
users:
|
users:
|
||||||
# database owner
|
zalando: # database owner
|
||||||
zalando:
|
|
||||||
- superuser
|
- superuser
|
||||||
- createdb
|
- createdb
|
||||||
|
foo_user: [] # role for application foo
|
||||||
# role for application foo
|
|
||||||
foo_user: []
|
|
||||||
|
|
||||||
#databases: name->owner
|
|
||||||
databases:
|
databases:
|
||||||
foo: zalando
|
foo: zalando # dbname: owner
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "10"
|
version: "10"
|
||||||
|
|
|
||||||
|
|
@ -4,57 +4,58 @@ metadata:
|
||||||
name: postgresql-operator-default-configuration
|
name: postgresql-operator-default-configuration
|
||||||
configuration:
|
configuration:
|
||||||
etcd_host: ""
|
etcd_host: ""
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p7
|
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p9
|
||||||
workers: 4
|
# enable_shm_volume: true
|
||||||
min_instances: -1
|
|
||||||
max_instances: -1
|
max_instances: -1
|
||||||
|
min_instances: -1
|
||||||
resync_period: 30m
|
resync_period: 30m
|
||||||
repair_period: 5m
|
repair_period: 5m
|
||||||
# enable_shm_volume: true
|
# set_memory_request_to_limit: false
|
||||||
|
# sidecar_docker_images:
|
||||||
#sidecar_docker_images:
|
|
||||||
# example: "exampleimage:exampletag"
|
# example: "exampleimage:exampletag"
|
||||||
|
workers: 4
|
||||||
users:
|
users:
|
||||||
super_username: postgres
|
|
||||||
replication_username: standby
|
replication_username: standby
|
||||||
|
super_username: postgres
|
||||||
kubernetes:
|
kubernetes:
|
||||||
pod_service_account_name: operator
|
|
||||||
pod_terminate_grace_period: 5m
|
|
||||||
pdb_name_format: "postgres-{cluster}-pdb"
|
|
||||||
enable_pod_disruption_budget: true
|
|
||||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
|
||||||
cluster_domain: cluster.local
|
cluster_domain: cluster.local
|
||||||
oauth_token_secret_name: postgresql-operator
|
|
||||||
pod_role_label: spilo-role
|
|
||||||
# spilo_fsgroup: 103
|
|
||||||
spilo_privileged: false
|
|
||||||
cluster_labels:
|
cluster_labels:
|
||||||
application: spilo
|
application: spilo
|
||||||
|
cluster_name_label: cluster-name
|
||||||
|
enable_pod_antiaffinity: false
|
||||||
|
enable_pod_disruption_budget: true
|
||||||
|
# infrastructure_roles_secret_name: ""
|
||||||
# inherited_labels:
|
# inherited_labels:
|
||||||
# - application
|
# - application
|
||||||
# - app
|
# - app
|
||||||
cluster_name_label: cluster-name
|
|
||||||
# watched_namespace:""
|
|
||||||
# node_readiness_label: ""
|
# node_readiness_label: ""
|
||||||
# toleration: {}
|
oauth_token_secret_name: postgresql-operator
|
||||||
# infrastructure_roles_secret_name: ""
|
pdb_name_format: "postgres-{cluster}-pdb"
|
||||||
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||||
# pod_environment_configmap: ""
|
# pod_environment_configmap: ""
|
||||||
pod_management_policy: "ordered_ready"
|
pod_management_policy: "ordered_ready"
|
||||||
enable_pod_antiaffinity: false
|
pod_role_label: spilo-role
|
||||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
pod_service_account_name: operator
|
||||||
|
pod_terminate_grace_period: 5m
|
||||||
|
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||||
|
# spilo_fsgroup: 103
|
||||||
|
spilo_privileged: false
|
||||||
|
# toleration: {}
|
||||||
|
# watched_namespace:""
|
||||||
postgres_pod_resources:
|
postgres_pod_resources:
|
||||||
default_cpu_request: 100m
|
|
||||||
default_memory_request: 100Mi
|
|
||||||
default_cpu_limit: "3"
|
default_cpu_limit: "3"
|
||||||
|
default_cpu_request: 100m
|
||||||
default_memory_limit: 1Gi
|
default_memory_limit: 1Gi
|
||||||
|
default_memory_request: 100Mi
|
||||||
timeouts:
|
timeouts:
|
||||||
resource_check_interval: 3s
|
|
||||||
resource_check_timeout: 10m
|
|
||||||
pod_label_wait_timeout: 10m
|
pod_label_wait_timeout: 10m
|
||||||
pod_deletion_wait_timeout: 10m
|
pod_deletion_wait_timeout: 10m
|
||||||
ready_wait_interval: 4s
|
ready_wait_interval: 4s
|
||||||
ready_wait_timeout: 30s
|
ready_wait_timeout: 30s
|
||||||
|
resource_check_interval: 3s
|
||||||
|
resource_check_timeout: 10m
|
||||||
load_balancer:
|
load_balancer:
|
||||||
|
# db_hosted_zone: ""
|
||||||
enable_master_load_balancer: false
|
enable_master_load_balancer: false
|
||||||
enable_replica_load_balancer: false
|
enable_replica_load_balancer: false
|
||||||
# custom_service_annotations:
|
# custom_service_annotations:
|
||||||
|
|
@ -63,41 +64,41 @@ configuration:
|
||||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||||
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||||
aws_or_gcp:
|
aws_or_gcp:
|
||||||
# db_hosted_zone: ""
|
|
||||||
# wal_s3_bucket: ""
|
|
||||||
# log_s3_bucket: ""
|
|
||||||
# kube_iam_role: ""
|
|
||||||
aws_region: eu-central-1
|
|
||||||
# additional_secret_mount: "some-secret-name"
|
# additional_secret_mount: "some-secret-name"
|
||||||
# additional_secret_mount_path: "/some/dir"
|
# additional_secret_mount_path: "/some/dir"
|
||||||
|
aws_region: eu-central-1
|
||||||
|
# kube_iam_role: ""
|
||||||
|
# log_s3_bucket: ""
|
||||||
|
# wal_s3_bucket: ""
|
||||||
|
logical_backup:
|
||||||
|
logical_backup_schedule: "30 00 * * *"
|
||||||
|
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
||||||
|
logical_backup_s3_bucket: "my-bucket-url"
|
||||||
debug:
|
debug:
|
||||||
debug_logging: true
|
debug_logging: true
|
||||||
enable_database_access: true
|
enable_database_access: true
|
||||||
teams_api:
|
teams_api:
|
||||||
enable_teams_api: false
|
# enable_admin_role_for_users: true
|
||||||
team_api_role_configuration:
|
|
||||||
log_statement: all
|
|
||||||
enable_team_superuser: false
|
enable_team_superuser: false
|
||||||
team_admin_role: admin
|
enable_teams_api: false
|
||||||
pam_role_name: zalandos
|
|
||||||
# pam_configuration: ""
|
# pam_configuration: ""
|
||||||
|
pam_role_name: zalandos
|
||||||
|
# postgres_superuser_teams: "postgres_superusers"
|
||||||
protected_role_names:
|
protected_role_names:
|
||||||
- admin
|
- admin
|
||||||
|
team_admin_role: admin
|
||||||
|
team_api_role_configuration:
|
||||||
|
log_statement: all
|
||||||
# teams_api_url: ""
|
# teams_api_url: ""
|
||||||
# postgres_superuser_teams: "postgres_superusers"
|
|
||||||
logging_rest_api:
|
logging_rest_api:
|
||||||
api_port: 8008
|
api_port: 8008
|
||||||
ring_log_lines: 100
|
|
||||||
cluster_history_entries: 1000
|
cluster_history_entries: 1000
|
||||||
|
ring_log_lines: 100
|
||||||
scalyr:
|
scalyr:
|
||||||
scalyr_cpu_request: 100m
|
|
||||||
scalyr_memory_request: 50Mi
|
|
||||||
scalyr_cpu_limit: "1"
|
|
||||||
scalyr_memory_limit: 1Gi
|
|
||||||
# scalyr_api_key: ""
|
# scalyr_api_key: ""
|
||||||
|
scalyr_cpu_limit: "1"
|
||||||
|
scalyr_cpu_request: 100m
|
||||||
# scalyr_image: ""
|
# scalyr_image: ""
|
||||||
|
scalyr_memory_limit: 1Gi
|
||||||
|
scalyr_memory_request: 50Mi
|
||||||
# scalyr_server_url: ""
|
# scalyr_server_url: ""
|
||||||
logical_backup:
|
|
||||||
logical_backup_schedule: "30 00 * * *"
|
|
||||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
|
||||||
logical_backup_s3_bucket: ""
|
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ spec:
|
||||||
numberOfInstances: 1
|
numberOfInstances: 1
|
||||||
postgresql:
|
postgresql:
|
||||||
version: "10"
|
version: "10"
|
||||||
# Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming.
|
# Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming.
|
||||||
standby:
|
standby:
|
||||||
s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/"
|
s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -121,6 +121,7 @@ type TeamsAPIConfiguration struct {
|
||||||
TeamsAPIUrl string `json:"teams_api_url,omitempty"`
|
TeamsAPIUrl string `json:"teams_api_url,omitempty"`
|
||||||
TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"`
|
TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"`
|
||||||
EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"`
|
EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"`
|
||||||
|
EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"`
|
||||||
TeamAdminRole string `json:"team_admin_role,omitempty"`
|
TeamAdminRole string `json:"team_admin_role,omitempty"`
|
||||||
PamRoleName string `json:"pam_role_name,omitempty"`
|
PamRoleName string `json:"pam_role_name,omitempty"`
|
||||||
PamConfiguration string `json:"pam_configuration,omitempty"`
|
PamConfiguration string `json:"pam_configuration,omitempty"`
|
||||||
|
|
@ -155,12 +156,12 @@ type OperatorConfigurationData struct {
|
||||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||||
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
||||||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||||
|
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
||||||
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
||||||
Sidecars map[string]string `json:"sidecar_docker_images,omitempty"`
|
Sidecars map[string]string `json:"sidecar_docker_images,omitempty"`
|
||||||
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
||||||
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
||||||
PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"`
|
PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"`
|
||||||
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
|
||||||
Timeouts OperatorTimeouts `json:"timeouts"`
|
Timeouts OperatorTimeouts `json:"timeouts"`
|
||||||
LoadBalancer LoadBalancerConfiguration `json:"load_balancer"`
|
LoadBalancer LoadBalancerConfiguration `json:"load_balancer"`
|
||||||
AWSGCP AWSGCPConfiguration `json:"aws_or_gcp"`
|
AWSGCP AWSGCPConfiguration `json:"aws_or_gcp"`
|
||||||
|
|
|
||||||
|
|
@ -60,7 +60,7 @@ type PostgresSpec struct {
|
||||||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||||
StandbyCluster *StandbyDescription `json:"standby"`
|
StandbyCluster *StandbyDescription `json:"standby"`
|
||||||
|
|
||||||
// deprectaed json tags
|
// deprecated json tags
|
||||||
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
||||||
PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"`
|
PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,7 @@ func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, con
|
||||||
func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigurationData) *config.Config {
|
func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigurationData) *config.Config {
|
||||||
result := &config.Config{}
|
result := &config.Config{}
|
||||||
|
|
||||||
|
// general config
|
||||||
result.EtcdHost = fromCRD.EtcdHost
|
result.EtcdHost = fromCRD.EtcdHost
|
||||||
result.DockerImage = fromCRD.DockerImage
|
result.DockerImage = fromCRD.DockerImage
|
||||||
result.Workers = fromCRD.Workers
|
result.Workers = fromCRD.Workers
|
||||||
|
|
@ -31,12 +32,15 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.MaxInstances = fromCRD.MaxInstances
|
result.MaxInstances = fromCRD.MaxInstances
|
||||||
result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod)
|
result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod)
|
||||||
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
|
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
|
||||||
|
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
||||||
result.ShmVolume = fromCRD.ShmVolume
|
result.ShmVolume = fromCRD.ShmVolume
|
||||||
result.Sidecars = fromCRD.Sidecars
|
result.Sidecars = fromCRD.Sidecars
|
||||||
|
|
||||||
|
// user config
|
||||||
result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername
|
result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername
|
||||||
result.ReplicationUsername = fromCRD.PostgresUsersConfiguration.ReplicationUsername
|
result.ReplicationUsername = fromCRD.PostgresUsersConfiguration.ReplicationUsername
|
||||||
|
|
||||||
|
// kubernetes config
|
||||||
result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName
|
result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName
|
||||||
result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition
|
result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition
|
||||||
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition
|
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition
|
||||||
|
|
@ -59,16 +63,16 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
||||||
result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy
|
result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy
|
||||||
result.MasterPodMoveTimeout = fromCRD.Kubernetes.MasterPodMoveTimeout
|
result.MasterPodMoveTimeout = fromCRD.Kubernetes.MasterPodMoveTimeout
|
||||||
|
|
||||||
result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity
|
result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity
|
||||||
result.PodAntiAffinityTopologyKey = fromCRD.Kubernetes.PodAntiAffinityTopologyKey
|
result.PodAntiAffinityTopologyKey = fromCRD.Kubernetes.PodAntiAffinityTopologyKey
|
||||||
|
|
||||||
|
// Postgres Pod resources
|
||||||
result.DefaultCPURequest = fromCRD.PostgresPodResources.DefaultCPURequest
|
result.DefaultCPURequest = fromCRD.PostgresPodResources.DefaultCPURequest
|
||||||
result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest
|
result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest
|
||||||
result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit
|
result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit
|
||||||
result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit
|
result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit
|
||||||
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
|
||||||
|
|
||||||
|
// timeout config
|
||||||
result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval)
|
result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval)
|
||||||
result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout)
|
result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout)
|
||||||
result.PodLabelWaitTimeout = time.Duration(fromCRD.Timeouts.PodLabelWaitTimeout)
|
result.PodLabelWaitTimeout = time.Duration(fromCRD.Timeouts.PodLabelWaitTimeout)
|
||||||
|
|
@ -76,6 +80,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.ReadyWaitInterval = time.Duration(fromCRD.Timeouts.ReadyWaitInterval)
|
result.ReadyWaitInterval = time.Duration(fromCRD.Timeouts.ReadyWaitInterval)
|
||||||
result.ReadyWaitTimeout = time.Duration(fromCRD.Timeouts.ReadyWaitTimeout)
|
result.ReadyWaitTimeout = time.Duration(fromCRD.Timeouts.ReadyWaitTimeout)
|
||||||
|
|
||||||
|
// load balancer config
|
||||||
result.DbHostedZone = fromCRD.LoadBalancer.DbHostedZone
|
result.DbHostedZone = fromCRD.LoadBalancer.DbHostedZone
|
||||||
result.EnableMasterLoadBalancer = fromCRD.LoadBalancer.EnableMasterLoadBalancer
|
result.EnableMasterLoadBalancer = fromCRD.LoadBalancer.EnableMasterLoadBalancer
|
||||||
result.EnableReplicaLoadBalancer = fromCRD.LoadBalancer.EnableReplicaLoadBalancer
|
result.EnableReplicaLoadBalancer = fromCRD.LoadBalancer.EnableReplicaLoadBalancer
|
||||||
|
|
@ -83,6 +88,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
|
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
|
||||||
result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat
|
result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat
|
||||||
|
|
||||||
|
// AWS or GCP config
|
||||||
result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket
|
result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket
|
||||||
result.AWSRegion = fromCRD.AWSGCP.AWSRegion
|
result.AWSRegion = fromCRD.AWSGCP.AWSRegion
|
||||||
result.LogS3Bucket = fromCRD.AWSGCP.LogS3Bucket
|
result.LogS3Bucket = fromCRD.AWSGCP.LogS3Bucket
|
||||||
|
|
@ -90,20 +96,33 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount
|
result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount
|
||||||
result.AdditionalSecretMountPath = fromCRD.AWSGCP.AdditionalSecretMountPath
|
result.AdditionalSecretMountPath = fromCRD.AWSGCP.AdditionalSecretMountPath
|
||||||
|
|
||||||
|
// logical backup config
|
||||||
|
result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule
|
||||||
|
result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage
|
||||||
|
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
|
||||||
|
|
||||||
|
// debug config
|
||||||
result.DebugLogging = fromCRD.OperatorDebug.DebugLogging
|
result.DebugLogging = fromCRD.OperatorDebug.DebugLogging
|
||||||
result.EnableDBAccess = fromCRD.OperatorDebug.EnableDBAccess
|
result.EnableDBAccess = fromCRD.OperatorDebug.EnableDBAccess
|
||||||
|
|
||||||
|
// Teams API config
|
||||||
result.EnableTeamsAPI = fromCRD.TeamsAPI.EnableTeamsAPI
|
result.EnableTeamsAPI = fromCRD.TeamsAPI.EnableTeamsAPI
|
||||||
result.TeamsAPIUrl = fromCRD.TeamsAPI.TeamsAPIUrl
|
result.TeamsAPIUrl = fromCRD.TeamsAPI.TeamsAPIUrl
|
||||||
result.TeamAPIRoleConfiguration = fromCRD.TeamsAPI.TeamAPIRoleConfiguration
|
result.TeamAPIRoleConfiguration = fromCRD.TeamsAPI.TeamAPIRoleConfiguration
|
||||||
result.EnableTeamSuperuser = fromCRD.TeamsAPI.EnableTeamSuperuser
|
result.EnableTeamSuperuser = fromCRD.TeamsAPI.EnableTeamSuperuser
|
||||||
|
result.EnableAdminRoleForUsers = fromCRD.TeamsAPI.EnableAdminRoleForUsers
|
||||||
result.TeamAdminRole = fromCRD.TeamsAPI.TeamAdminRole
|
result.TeamAdminRole = fromCRD.TeamsAPI.TeamAdminRole
|
||||||
result.PamRoleName = fromCRD.TeamsAPI.PamRoleName
|
result.PamRoleName = fromCRD.TeamsAPI.PamRoleName
|
||||||
|
result.PamConfiguration = fromCRD.TeamsAPI.PamConfiguration
|
||||||
|
result.ProtectedRoles = fromCRD.TeamsAPI.ProtectedRoles
|
||||||
result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams
|
result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams
|
||||||
|
|
||||||
|
// logging REST API config
|
||||||
result.APIPort = fromCRD.LoggingRESTAPI.APIPort
|
result.APIPort = fromCRD.LoggingRESTAPI.APIPort
|
||||||
result.RingLogLines = fromCRD.LoggingRESTAPI.RingLogLines
|
result.RingLogLines = fromCRD.LoggingRESTAPI.RingLogLines
|
||||||
result.ClusterHistoryEntries = fromCRD.LoggingRESTAPI.ClusterHistoryEntries
|
result.ClusterHistoryEntries = fromCRD.LoggingRESTAPI.ClusterHistoryEntries
|
||||||
|
|
||||||
|
// Scalyr config
|
||||||
result.ScalyrAPIKey = fromCRD.Scalyr.ScalyrAPIKey
|
result.ScalyrAPIKey = fromCRD.Scalyr.ScalyrAPIKey
|
||||||
result.ScalyrImage = fromCRD.Scalyr.ScalyrImage
|
result.ScalyrImage = fromCRD.Scalyr.ScalyrImage
|
||||||
result.ScalyrServerURL = fromCRD.Scalyr.ScalyrServerURL
|
result.ScalyrServerURL = fromCRD.Scalyr.ScalyrServerURL
|
||||||
|
|
@ -112,9 +131,5 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.ScalyrCPULimit = fromCRD.Scalyr.ScalyrCPULimit
|
result.ScalyrCPULimit = fromCRD.Scalyr.ScalyrCPULimit
|
||||||
result.ScalyrMemoryLimit = fromCRD.Scalyr.ScalyrMemoryLimit
|
result.ScalyrMemoryLimit = fromCRD.Scalyr.ScalyrMemoryLimit
|
||||||
|
|
||||||
result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule
|
|
||||||
result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage
|
|
||||||
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -85,7 +85,7 @@ type Config struct {
|
||||||
|
|
||||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use k8s as a DCS
|
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use k8s as a DCS
|
||||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-11:1.5-p7"`
|
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-11:1.5-p9"`
|
||||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
Sidecars map[string]string `name:"sidecar_docker_images"`
|
||||||
// default name `operator` enables backward compatibility with the older ServiceAccountName field
|
// default name `operator` enables backward compatibility with the older ServiceAccountName field
|
||||||
PodServiceAccountName string `name:"pod_service_account_name" default:"operator"`
|
PodServiceAccountName string `name:"pod_service_account_name" default:"operator"`
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue