294 lines
10 KiB
YAML
294 lines
10 KiB
YAML
image:
|
|
registry: registry.opensource.zalan.do
|
|
repository: acid/postgres-operator
|
|
tag: v1.3.0
|
|
pullPolicy: "IfNotPresent"
|
|
|
|
# Optionally specify an array of imagePullSecrets.
|
|
# Secrets must be manually created in the namespace.
|
|
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
|
|
# imagePullSecrets:
|
|
# - name: myRegistryKeySecretName
|
|
|
|
podAnnotations: {}
|
|
podLabels: {}
|
|
|
|
configTarget: "OperatorConfigurationCRD"
|
|
|
|
# general top-level configuration parameters
|
|
configGeneral:
|
|
# choose if deployment creates/updates CRDs with OpenAPIV3Validation
|
|
enable_crd_validation: true
|
|
# start any new database pod without limitations on shm memory
|
|
enable_shm_volume: true
|
|
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
|
etcd_host: ""
|
|
# Spilo docker image
|
|
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16
|
|
# max number of instances in Postgres cluster. -1 = no limit
|
|
min_instances: -1
|
|
# min number of instances in Postgres cluster. -1 = no limit
|
|
max_instances: -1
|
|
# period between consecutive repair requests
|
|
repair_period: 5m
|
|
# period between consecutive sync requests
|
|
resync_period: 30m
|
|
# can prevent certain cases of memory overcommitment
|
|
# set_memory_request_to_limit: false
|
|
|
|
# map of sidecar names to docker images
|
|
# sidecar_docker_images
|
|
# example: "exampleimage:exampletag"
|
|
|
|
# number of routines the operator spawns to process requests concurrently
|
|
workers: 4
|
|
|
|
# parameters describing Postgres users
|
|
configUsers:
|
|
# postgres username used for replication between instances
|
|
replication_username: standby
|
|
# postgres superuser name to be created by initdb
|
|
super_username: postgres
|
|
|
|
configKubernetes:
|
|
# default DNS domain of K8s cluster where operator is running
|
|
cluster_domain: cluster.local
|
|
# additional labels assigned to the cluster objects
|
|
cluster_labels:
|
|
application: spilo
|
|
# label assigned to Kubernetes objects created by the operator
|
|
cluster_name_label: cluster-name
|
|
# additional annotations to add to every database pod
|
|
# custom_pod_annotations:
|
|
# keya: valuea
|
|
# keyb: valueb
|
|
|
|
# enables initContainers to run actions before Spilo is started
|
|
enable_init_containers: true
|
|
# toggles pod anti affinity on the Postgres pods
|
|
enable_pod_antiaffinity: false
|
|
# toggles PDB to set to MinAvailabe 0 or 1
|
|
enable_pod_disruption_budget: true
|
|
# enables sidecar containers to run alongside Spilo in the same pod
|
|
enable_sidecars: true
|
|
# name of the secret containing infrastructure roles names and passwords
|
|
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
|
|
|
# list of labels that can be inherited from the cluster manifest
|
|
# inherited_labels:
|
|
# - application
|
|
# - environment
|
|
|
|
# timeout for successful migration of master pods from unschedulable node
|
|
# master_pod_move_timeout: 20m
|
|
|
|
# set of labels that a running and active node should possess to be considered ready
|
|
# node_readiness_label:
|
|
# status: ready
|
|
|
|
# name of the secret containing the OAuth2 token to pass to the teams API
|
|
# oauth_token_secret_name: postgresql-operator
|
|
|
|
# defines the template for PDB (Pod Disruption Budget) names
|
|
pdb_name_format: "postgres-{cluster}-pdb"
|
|
# override topology key for pod anti affinity
|
|
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
|
# name of the ConfigMap with environment variables to populate on every pod
|
|
# pod_environment_configmap: ""
|
|
|
|
# specify the pod management policy of stateful sets of Postgres clusters
|
|
pod_management_policy: "ordered_ready"
|
|
# label assigned to the Postgres pods (and services/endpoints)
|
|
pod_role_label: spilo-role
|
|
# Postgres pods are terminated forcefully after this timeout
|
|
pod_terminate_grace_period: 5m
|
|
# template for database user secrets generated by the operator
|
|
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
|
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
|
# spilo_fsgroup: 103
|
|
|
|
# whether the Spilo container should run in privileged mode
|
|
spilo_privileged: false
|
|
# operator watches for postgres objects in the given namespace
|
|
watched_namespace: "*" # listen to all namespaces
|
|
|
|
# configure resource requests for the Postgres pods
|
|
configPostgresPodResources:
|
|
# CPU limits for the postgres containers
|
|
default_cpu_limit: "3"
|
|
# cpu request value for the postgres containers
|
|
default_cpu_request: 100m
|
|
# memory limits for the postgres containers
|
|
default_memory_limit: 1Gi
|
|
# memory request value for the postgres containers
|
|
default_memory_request: 100Mi
|
|
|
|
# timeouts related to some operator actions
|
|
configTimeouts:
|
|
# timeout when waiting for the Postgres pods to be deleted
|
|
pod_deletion_wait_timeout: 10m
|
|
# timeout when waiting for pod role and cluster labels
|
|
pod_label_wait_timeout: 10m
|
|
# interval between consecutive attempts waiting for postgresql CRD to be created
|
|
ready_wait_interval: 3s
|
|
# timeout for the complete postgres CRD creation
|
|
ready_wait_timeout: 30s
|
|
# interval to wait between consecutive attempts to check for some K8s resources
|
|
resource_check_interval: 3s
|
|
# timeout when waiting for the presence of a certain K8s resource (e.g. Sts, PDB)
|
|
resource_check_timeout: 10m
|
|
|
|
# configure behavior of load balancers
|
|
configLoadBalancer:
|
|
# DNS zone for cluster DNS name when load balancer is configured for cluster
|
|
db_hosted_zone: db.example.com
|
|
# annotations to apply to service when load balancing is enabled
|
|
# custom_service_annotations:
|
|
# keyx: valuez
|
|
# keya: valuea
|
|
|
|
# toggles service type load balancer pointing to the master pod of the cluster
|
|
enable_master_load_balancer: false
|
|
# toggles service type load balancer pointing to the replica pod of the cluster
|
|
enable_replica_load_balancer: false
|
|
# defines the DNS name string template for the master load balancer cluster
|
|
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
|
# defines the DNS name string template for the replica load balancer cluster
|
|
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
|
|
|
# options to aid debugging of the operator itself
|
|
configDebug:
|
|
# toggles verbose debug logs from the operator
|
|
debug_logging: true
|
|
# toggles operator functionality that require access to the postgres database
|
|
enable_database_access: true
|
|
|
|
# parameters affecting logging and REST API listener
|
|
configLoggingRestApi:
|
|
# REST API listener listens to this port
|
|
api_port: 8080
|
|
# number of entries in the cluster history ring buffer
|
|
cluster_history_entries: 1000
|
|
# number of lines in the ring buffer used to store cluster logs
|
|
ring_log_lines: 100
|
|
|
|
# configure interaction with non-Kubernetes objects from AWS or GCP
|
|
configAwsOrGcp:
|
|
# Additional Secret (aws or gcp credentials) to mount in the pod
|
|
# additional_secret_mount: "some-secret-name"
|
|
|
|
# Path to mount the above Secret in the filesystem of the container(s)
|
|
# additional_secret_mount_path: "/some/dir"
|
|
|
|
# AWS region used to store ESB volumes
|
|
aws_region: eu-central-1
|
|
|
|
# AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods
|
|
# kube_iam_role: ""
|
|
|
|
# S3 bucket to use for shipping postgres daily logs
|
|
# log_s3_bucket: ""
|
|
|
|
# S3 bucket to use for shipping WAL segments with WAL-E
|
|
# wal_s3_bucket: ""
|
|
|
|
# configure K8s cron job managed by the operator
|
|
configLogicalBackup:
|
|
# image for pods of the logical backup job (example runs pg_dumpall)
|
|
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
|
# S3 Access Key ID
|
|
logical_backup_s3_access_key_id: ""
|
|
# S3 bucket to store backup results
|
|
logical_backup_s3_bucket: "my-bucket-url"
|
|
# S3 endpoint url when not using AWS
|
|
logical_backup_s3_endpoint: ""
|
|
# S3 Secret Access Key
|
|
logical_backup_s3_secret_access_key: ""
|
|
# S3 server side encription
|
|
logical_backup_s3_sse: "AES256"
|
|
# backup schedule in the cron format
|
|
logical_backup_schedule: "30 00 * * *"
|
|
|
|
# automate creation of human users with teams API service
|
|
configTeamsApi:
|
|
# team_admin_role will have the rights to grant roles coming from PG manifests
|
|
# enable_admin_role_for_users: true
|
|
|
|
# toggle to grant superuser to team members created from the Teams API
|
|
enable_team_superuser: false
|
|
# toggles usage of the Teams API by the operator
|
|
enable_teams_api: false
|
|
# should contain a URL to use for authentication (username and token)
|
|
# pam_configuration: ""
|
|
|
|
# operator will add all team member roles to this group and add a pg_hba line
|
|
pam_role_name: zalandos
|
|
# List of teams which members need the superuser role in each Postgres cluster
|
|
# postgres_superuser_teams:
|
|
# - postgres_superusers
|
|
|
|
# List of roles that cannot be overwritten by an application, team or infrastructure role
|
|
protected_role_names:
|
|
- admin
|
|
# role name to grant to team members created from the Teams API
|
|
team_admin_role: admin
|
|
# postgres config parameters to apply to each team member role
|
|
team_api_role_configuration:
|
|
log_statement: all
|
|
# URL of the Teams API service
|
|
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
|
|
|
# Scalyr is a log management tool that Zalando uses as a sidecar
|
|
configScalyr:
|
|
# API key for the Scalyr sidecar
|
|
# scalyr_api_key: ""
|
|
|
|
# Docker image for the Scalyr sidecar
|
|
# scalyr_image: ""
|
|
|
|
# CPU limit value for the Scalyr sidecar
|
|
scalyr_cpu_limit: "1"
|
|
# CPU rquest value for the Scalyr sidecar
|
|
scalyr_cpu_request: 100m
|
|
# Memory limit value for the Scalyr sidecar
|
|
scalyr_memory_limit: 1Gi
|
|
# Memory request value for the Scalyr sidecar
|
|
scalyr_memory_request: 50Mi
|
|
|
|
rbac:
|
|
# Specifies whether RBAC resources should be created
|
|
create: true
|
|
|
|
crd:
|
|
# Specifies whether custom resource definitions should be created
|
|
create: true
|
|
|
|
serviceAccount:
|
|
# Specifies whether a ServiceAccount should be created
|
|
create: true
|
|
# The name of the ServiceAccount to use.
|
|
# If not set and create is true, a name is generated using the fullname template
|
|
name:
|
|
|
|
priorityClassName: ""
|
|
|
|
resources: {}
|
|
# limits:
|
|
# cpu: 100m
|
|
# memory: 300Mi
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 300Mi
|
|
|
|
# Affinity for pod assignment
|
|
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
|
affinity: {}
|
|
|
|
# Tolerations for pod assignment
|
|
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
|
tolerations: []
|
|
|
|
# Node labels for pod assignment
|
|
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
|
nodeSelector: {}
|