align config map, operator config, helm chart values and templates (#595)
* align config map, operator config, helm chart values and templates * follow helm chart conventions also in CRD templates * split up values files and add comments * avoid yaml confusion in postgres manifests * bump spilo version and use example for logical_backup_s3_bucket * add ConfigTarget switch to values
This commit is contained in:
		
							parent
							
								
									3a914f9a3c
								
							
						
					
					
						commit
						7c19cf50db
					
				|  | @ -1,3 +1,4 @@ | |||
| {{- if eq .Values.configTarget "ConfigMap" }} | ||||
| apiVersion: v1 | ||||
| kind: ConfigMap | ||||
| metadata: | ||||
|  | @ -13,21 +14,14 @@ data: | |||
|                             {{- else }} | ||||
|                                 {{ .Values.serviceAccount.name }} | ||||
|                             {{- end }} | ||||
|   api_port: "{{ .Values.configLoggingRestApi.api_port }}" | ||||
|   cluster_history_entries: "{{ .Values.configLoggingRestApi.cluster_history_entries }}" | ||||
|   docker_image: {{ .Values.docker_image }} | ||||
|   debug_logging: "{{ .Values.configDebug.debug_logging }}" | ||||
|   enable_database_access: "{{ .Values.configDebug.enable_database_access }}" | ||||
|   enable_shm_volume: "{{ .Values.enable_shm_volume }}" | ||||
|   repair_period: {{ .Values.repair_period }} | ||||
|   resync_period: {{ .Values.resync_period }} | ||||
|   ring_log_lines: "{{ .Values.configLoggingRestApi.ring_log_lines }}" | ||||
|   spilo_privileged: "{{ .Values.spilo_privileged }}" | ||||
|   workers: "{{ .Values.workers }}" | ||||
| {{ toYaml .Values.configMap | indent 2 }} | ||||
| {{ toYaml .Values.configGeneral | indent 2 }} | ||||
| {{ toYaml .Values.configUsers | indent 2 }} | ||||
| {{ toYaml .Values.configKubernetes | indent 2 }} | ||||
| {{ toYaml .Values.configTimeouts | indent 2 }} | ||||
| {{ toYaml .Values.configLoadBalancer | indent 2 }} | ||||
| {{ toYaml .Values.configAwsOrGcp | indent 2 }} | ||||
| {{ toYaml .Values.configLogicalBackup | indent 2 }} | ||||
| {{ toYaml .Values.configDebug | indent 2 }} | ||||
| {{ toYaml .Values.configLoggingRestApi | indent 2 }} | ||||
| {{ toYaml .Values.configTeamsApi | indent 2 }} | ||||
| {{- end }} | ||||
|  |  | |||
|  | @ -2,6 +2,11 @@ apiVersion: apiextensions.k8s.io/v1beta1 | |||
| kind: CustomResourceDefinition | ||||
| metadata: | ||||
|   name: postgresqls.acid.zalan.do | ||||
|   labels: | ||||
|     app.kubernetes.io/name: {{ template "postgres-operator.name" . }} | ||||
|     helm.sh/chart: {{ template "postgres-operator.chart" . }} | ||||
|     app.kubernetes.io/managed-by: {{ .Release.Service }} | ||||
|     app.kubernetes.io/instance: {{ .Release.Name }} | ||||
|   annotations: | ||||
|     "helm.sh/hook": crd-install | ||||
| spec: | ||||
|  | @ -22,6 +27,11 @@ apiVersion: apiextensions.k8s.io/v1beta1 | |||
| kind: CustomResourceDefinition | ||||
| metadata: | ||||
|   name: operatorconfigurations.acid.zalan.do | ||||
|   labels: | ||||
|     app.kubernetes.io/name: {{ template "postgres-operator.name" . }} | ||||
|     helm.sh/chart: {{ template "postgres-operator.chart" . }} | ||||
|     app.kubernetes.io/managed-by: {{ .Release.Service }} | ||||
|     app.kubernetes.io/instance: {{ .Release.Name }} | ||||
|   annotations: | ||||
|     "helm.sh/hook": crd-install | ||||
| spec: | ||||
|  |  | |||
|  | @ -1,4 +1,4 @@ | |||
| apiVersion: apps/v1beta2 | ||||
| apiVersion: apps/v1 | ||||
| kind: Deployment | ||||
| metadata: | ||||
|   labels: | ||||
|  | @ -16,9 +16,11 @@ spec: | |||
|   template: | ||||
|     metadata: | ||||
|       annotations: | ||||
|       {{- if eq .Values.configTarget "ConfigMap" }} | ||||
|         checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} | ||||
|         # In order to use the checksum of CRD OperatorConfiguration instead, use the following line instead | ||||
|         # {{ include (print $.Template.BasePath "/operatorconfiguration.yaml") . | sha256sum }} | ||||
|       {{- else }} | ||||
|         checksum/config: {{ include (print $.Template.BasePath "/operatorconfiguration.yaml") . | sha256sum }} | ||||
|       {{- end }} | ||||
|     {{- if .Values.podAnnotations }} | ||||
| {{ toYaml .Values.podAnnotations | indent 8 }} | ||||
|     {{- end }} | ||||
|  | @ -39,11 +41,13 @@ spec: | |||
|         image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" | ||||
|         imagePullPolicy: {{ .Values.image.pullPolicy }} | ||||
|         env: | ||||
|       {{- if eq .Values.configTarget "ConfigMap" }} | ||||
|         - name: CONFIG_MAP_NAME | ||||
|           value: {{ template "postgres-operator.fullname" . }} | ||||
|         # In order to use the CRD OperatorConfiguration instead, uncomment these lines and comment out the two lines above | ||||
|         # - name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT | ||||
|         #  value: {{ template "postgres-operator.fullname" . }} | ||||
|       {{- else }} | ||||
|         - name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT | ||||
|           value: {{ template "postgres-operator.fullname" . }} | ||||
|       {{- end }} | ||||
|         resources: | ||||
| {{ toYaml .Values.resources | indent 10 }} | ||||
|       {{- if .Values.imagePullSecrets }} | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| {{- if eq .Values.configTarget "OperatorConfigurationCRD" }} | ||||
| apiVersion: "acid.zalan.do/v1" | ||||
| kind: OperatorConfiguration | ||||
| metadata: | ||||
|  | @ -8,26 +9,19 @@ metadata: | |||
|     app.kubernetes.io/managed-by: {{ .Release.Service }} | ||||
|     app.kubernetes.io/instance: {{ .Release.Name }} | ||||
| configuration: | ||||
|   docker_image: {{ .Values.docker_image }} | ||||
|   enable_shm_volume: {{ .Values.enable_shm_volume }} | ||||
|   repair_period: {{ .Values.repair_period }} | ||||
|   resync_period: {{ .Values.resync_period }} | ||||
|   workers: {{ .Values.workers }} | ||||
| {{ toYaml .Values.configCRD | indent 2 }} | ||||
| {{ toYaml .Values.configGeneral | indent 2 }} | ||||
|   users: | ||||
| {{ toYaml .Values.configUsers | indent 4 }} | ||||
|   kubernetes: | ||||
|     oauth_token_secret_name: {{ template "postgres-operator.fullname" . }} | ||||
|     pod_service_account_name: operator | ||||
|     spilo_privileged: {{ .Values.spilo_privileged }} | ||||
| {{ toYaml .Values.configKubernetes | indent 4 }} | ||||
| {{ toYaml .Values.configKubernetesCRD | indent 4 }} | ||||
|   postgres_pod_resources: | ||||
| {{ toYaml .Values.configPostgresPodResources | indent 4 }} | ||||
|   timeouts: | ||||
| {{ toYaml .Values.configTimeouts | indent 4 }} | ||||
|   load_balancer: | ||||
| {{ toYaml .Values.configLoadBalancerCRD | indent 4 }} | ||||
| {{ toYaml .Values.configLoadBalancer | indent 4 }} | ||||
|   aws_or_gcp: | ||||
| {{ toYaml .Values.configAwsOrGcp | indent 4 }} | ||||
|   logical_backup: | ||||
|  | @ -35,8 +29,9 @@ configuration: | |||
|   debug: | ||||
| {{ toYaml .Values.configDebug | indent 4 }} | ||||
|   teams_api: | ||||
| {{ toYaml .Values.configTeamsApiCRD | indent 4 }} | ||||
| {{ toYaml .Values.configTeamsApi | indent 4 }} | ||||
|   logging_rest_api: | ||||
| {{ toYaml .Values.configLoggingRestApi | indent 4 }} | ||||
|   scalyr: | ||||
| {{ toYaml .Values.configScalyr | indent 4 }} | ||||
| {{- end }} | ||||
|  |  | |||
|  | @ -0,0 +1,270 @@ | |||
| image: | ||||
|   registry: registry.opensource.zalan.do | ||||
|   repository: acid/postgres-operator | ||||
|   tag: v1.1.0-54-g3a914f9 | ||||
|   pullPolicy: "IfNotPresent" | ||||
| 
 | ||||
| # Optionally specify an array of imagePullSecrets. | ||||
| # Secrets must be manually created in the namespace. | ||||
| # ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | ||||
| # imagePullSecrets: | ||||
|   # - name: myRegistryKeySecretName | ||||
| 
 | ||||
| podAnnotations: {} | ||||
| podLabels: {} | ||||
| 
 | ||||
| configTarget: "OperatorConfigurationCRD" | ||||
| 
 | ||||
| # general top-level configuration parameters | ||||
| configGeneral: | ||||
|   # start any new database pod without limitations on shm memory | ||||
|   enable_shm_volume: true | ||||
|   # etcd connection string for Patroni. Empty uses K8s-native DCS. | ||||
|   etcd_host: "" | ||||
|   # Spilo docker image | ||||
|   docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p9 | ||||
|   # max number of instances in Postgres cluster. -1 = no limit | ||||
|   min_instances: -1 | ||||
|   # min number of instances in Postgres cluster. -1 = no limit | ||||
|   max_instances: -1 | ||||
|   # period between consecutive repair requests | ||||
|   repair_period: 5m | ||||
|   # period between consecutive sync requests | ||||
|   resync_period: 30m | ||||
|   # can prevent certain cases of memory overcommitment | ||||
|   # set_memory_request_to_limit: false | ||||
| 
 | ||||
|   # map of sidecar names to docker images | ||||
|   # sidecar_docker_images | ||||
|   #  example: "exampleimage:exampletag" | ||||
| 
 | ||||
|   # number of routines the operator spawns to process requests concurrently | ||||
|   workers: 4 | ||||
| 
 | ||||
| # parameters describing Postgres users | ||||
| configUsers: | ||||
|   # postgres username used for replication between instances | ||||
|   replication_username: standby | ||||
|   # postgres superuser name to be created by initdb | ||||
|   super_username: postgres | ||||
| 
 | ||||
| configKubernetes: | ||||
|   # default DNS domain of K8s cluster where operator is running | ||||
|   cluster_domain: cluster.local | ||||
|   # additional labels assigned to the cluster objects | ||||
|   cluster_labels: | ||||
|       application: spilo | ||||
|   # label assigned to Kubernetes objects created by the operator | ||||
|   cluster_name_label: cluster-name | ||||
|   # toggles pod anti affinity on the Postgres pods | ||||
|   enable_pod_antiaffinity: false | ||||
|   # toggles PDB to set to MinAvailabe 0 or 1 | ||||
|   enable_pod_disruption_budget: true | ||||
|   # name of the secret containing infrastructure roles names and passwords | ||||
|   # infrastructure_roles_secret_name: postgresql-infrastructure-roles | ||||
| 
 | ||||
|   # list of labels that can be inherited from the cluster manifest | ||||
|   # inherited_labels: | ||||
|   # - application | ||||
|   # - app | ||||
| 
 | ||||
|   # timeout for successful migration of master pods from unschedulable node | ||||
|   # master_pod_move_timeout: 20m | ||||
| 
 | ||||
|   # set of labels that a running and active node should possess to be considered ready | ||||
|   # node_readiness_label: "" | ||||
| 
 | ||||
|   # name of the secret containing the OAuth2 token to pass to the teams API | ||||
|   # oauth_token_secret_name: postgresql-operator | ||||
| 
 | ||||
|   # defines the template for PDB (Pod Disruption Budget) names | ||||
|   pdb_name_format: "postgres-{cluster}-pdb" | ||||
|   # override topology key for pod anti affinity | ||||
|   pod_antiaffinity_topology_key: "kubernetes.io/hostname" | ||||
|   # name of the ConfigMap with environment variables to populate on every pod | ||||
|   # pod_environment_configmap: "" | ||||
| 
 | ||||
|   # specify the pod management policy of stateful sets of Postgres clusters | ||||
|   pod_management_policy: "ordered_ready" | ||||
|   # label assigned to the Postgres pods (and services/endpoints) | ||||
|   pod_role_label: spilo-role | ||||
|   # Postgres pods are terminated forcefully after this timeout | ||||
|   pod_terminate_grace_period: 5m | ||||
|   # template for database user secrets generated by the operator | ||||
|   secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" | ||||
|   # group ID with write-access to volumes (required to run Spilo as non-root process) | ||||
|   # spilo_fsgroup: 103 | ||||
| 
 | ||||
|   # whether the Spilo container should run in privileged mode | ||||
|   spilo_privileged: false | ||||
|   # operator watches for postgres objects in the given namespace | ||||
|   watched_namespace: "*" # listen to all namespaces | ||||
| 
 | ||||
| # configure resource requests for the Postgres pods | ||||
| configPostgresPodResources: | ||||
|   # CPU limits for the postgres containers | ||||
|   default_cpu_limit: "3" | ||||
|   # cpu request value for the postgres containers | ||||
|   default_cpu_request: 100m | ||||
|   # memory limits for the postgres containers | ||||
|   default_memory_limit: 1Gi | ||||
|   # memory request value for the postgres containers | ||||
|   default_memory_request: 100Mi | ||||
| 
 | ||||
| # timeouts related to some operator actions | ||||
| configTimeouts: | ||||
|   # timeout when waiting for the Postgres pods to be deleted | ||||
|   pod_deletion_wait_timeout: 10m | ||||
|   # timeout when waiting for pod role and cluster labels | ||||
|   pod_label_wait_timeout: 10m | ||||
|   # interval between consecutive attempts waiting for postgresql CRD to be created | ||||
|   ready_wait_interval: 3s | ||||
|   # timeout for the complete postgres CRD creation | ||||
|   ready_wait_timeout: 30s | ||||
|   # interval to wait between consecutive attempts to check for some K8s resources | ||||
|   resource_check_interval: 3s | ||||
|   # timeout when waiting for the presence of a certain K8s resource (e.g. Sts, PDB) | ||||
|   resource_check_timeout: 10m | ||||
| 
 | ||||
| # configure behavior of load balancers | ||||
| configLoadBalancer: | ||||
|   # DNS zone for cluster DNS name when load balancer is configured for cluster | ||||
|   db_hosted_zone: db.example.com | ||||
|   # annotations to apply to service when load balancing is enabled | ||||
|   # custom_service_annotations: | ||||
|   #   keyx: valuez | ||||
|   #   keya: valuea | ||||
| 
 | ||||
|   # toggles service type load balancer pointing to the master pod of the cluster | ||||
|   enable_master_load_balancer: true | ||||
|   # toggles service type load balancer pointing to the replica pod of the cluster | ||||
|   enable_replica_load_balancer: false | ||||
|   # defines the DNS name string template for the master load balancer cluster | ||||
|   master_dns_name_format: "{cluster}.{team}.{hostedzone}" | ||||
|   # defines the DNS name string template for the replica load balancer cluster | ||||
|   replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" | ||||
| 
 | ||||
| # options to aid debugging of the operator itself | ||||
| configDebug: | ||||
|   # toggles verbose debug logs from the operator | ||||
|   debug_logging: true | ||||
|   # toggles operator functionality that require access to the postgres database | ||||
|   enable_database_access: true | ||||
| 
 | ||||
| # parameters affecting logging and REST API listener | ||||
| configLoggingRestApi: | ||||
|   # REST API listener listens to this port | ||||
|   api_port: 8080 | ||||
|   # number of entries in the cluster history ring buffer | ||||
|   cluster_history_entries: 1000 | ||||
|   # number of lines in the ring buffer used to store cluster logs | ||||
|   ring_log_lines: 100 | ||||
| 
 | ||||
| # configure interaction with non-Kubernetes objects from AWS or GCP | ||||
| configAwsOrGcp: | ||||
|   # Additional Secret (aws or gcp credentials) to mount in the pod | ||||
|   # additional_secret_mount: "some-secret-name" | ||||
| 
 | ||||
|   # Path to mount the above Secret in the filesystem of the container(s) | ||||
|   # additional_secret_mount_path: "/some/dir" | ||||
| 
 | ||||
|   # AWS region used to store ESB volumes | ||||
|   aws_region: eu-central-1 | ||||
| 
 | ||||
|   # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods | ||||
|   # kube_iam_role: "" | ||||
| 
 | ||||
|   # S3 bucket to use for shipping postgres daily logs | ||||
|   # log_s3_bucket: "" | ||||
| 
 | ||||
|   # S3 bucket to use for shipping WAL segments with WAL-E | ||||
|   # wal_s3_bucket: "" | ||||
| 
 | ||||
| # configure K8s cron job managed by the operator | ||||
| configLogicalBackup: | ||||
|   # backup schedule in the cron format | ||||
|   logical_backup_schedule: "30 00 * * *" | ||||
|   # image for pods of the logical backup job (example runs pg_dumpall) | ||||
|   logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" | ||||
|   # S3 bucket to store backup results | ||||
|   logical_backup_s3_bucket: "my-bucket-url" | ||||
| 
 | ||||
| # automate creation of human users with teams API service | ||||
| configTeamsApi: | ||||
|   # team_admin_role will have the rights to grant roles coming from PG manifests | ||||
|   # enable_admin_role_for_users: true | ||||
| 
 | ||||
|   # toggle to grant superuser to team members created from the Teams API | ||||
|   enable_team_superuser: false | ||||
|   # toggles usage of the Teams API by the operator | ||||
|   enable_teams_api: false | ||||
|   # should contain a URL to use for authentication (username and token) | ||||
|   # pam_configuration: "" | ||||
| 
 | ||||
|   # operator will add all team member roles to this group and add a pg_hba line | ||||
|   pam_role_name: zalandos | ||||
|   # List of teams which members need the superuser role in each Postgres cluster | ||||
|   # postgres_superuser_teams: "postgres_superusers" | ||||
| 
 | ||||
|   # List of roles that cannot be overwritten by an application, team or infrastructure role | ||||
|   protected_role_names: | ||||
|   - admin | ||||
|   # role name to grant to team members created from the Teams API | ||||
|   team_admin_role: admin | ||||
|   # postgres config parameters to apply to each team member role | ||||
|   team_api_role_configuration: | ||||
|     log_statement: all | ||||
|   # URL of the Teams API service | ||||
|   # teams_api_url: http://fake-teams-api.default.svc.cluster.local | ||||
| 
 | ||||
| # Scalyr is a log management tool that Zalando uses as a sidecar | ||||
| scalyr: | ||||
|   # API key for the Scalyr sidecar | ||||
|   # scalyr_api_key: "" | ||||
| 
 | ||||
|   # Docker image for the Scalyr sidecar | ||||
|   # scalyr_image: "" | ||||
| 
 | ||||
|   # CPU limit value for the Scalyr sidecar | ||||
|   scalyr_cpu_limit: "1" | ||||
|   # CPU rquest value for the Scalyr sidecar | ||||
|   scalyr_cpu_request: 100m | ||||
|   # Memory limit value for the Scalyr sidecar | ||||
|   scalyr_memory_limit: 1Gi | ||||
|   # Memory request value for the Scalyr sidecar | ||||
|   scalyr_memory_request: 50Mi | ||||
| 
 | ||||
| rbac: | ||||
|   # Specifies whether RBAC resources should be created | ||||
|   create: true | ||||
| 
 | ||||
| serviceAccount: | ||||
|   # Specifies whether a ServiceAccount should be created | ||||
|   create: true | ||||
|   # The name of the ServiceAccount to use. | ||||
|   # If not set and create is true, a name is generated using the fullname template | ||||
|   # When relying solely on the OperatorConfiguration CRD, set this value to "operator" | ||||
|   # Otherwise, the operator tries to use the "default" service account which is forbidden | ||||
|   name: "operator" | ||||
| 
 | ||||
| priorityClassName: "" | ||||
| 
 | ||||
| resources: {} | ||||
|   # limits: | ||||
|   #   cpu: 100m | ||||
|   #   memory: 300Mi | ||||
|   # requests: | ||||
|   #   cpu: 100m | ||||
|   #   memory: 300Mi | ||||
| 
 | ||||
| # Affinity for pod assignment | ||||
| # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | ||||
| affinity: {} | ||||
| 
 | ||||
| # Tolerations for pod assignment | ||||
| # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ||||
| tolerations: [] | ||||
| 
 | ||||
| # Node labels for pod assignment | ||||
| # Ref: https://kubernetes.io/docs/user-guide/node-selection/ | ||||
| nodeSelector: {} | ||||
|  | @ -1,7 +1,7 @@ | |||
| image: | ||||
|   registry: registry.opensource.zalan.do | ||||
|   repository: acid/postgres-operator | ||||
|   tag: v1.1.0-28-g24d412a | ||||
|   tag: v1.1.0-54-g3a914f9 | ||||
|   pullPolicy: "IfNotPresent" | ||||
| 
 | ||||
| # Optionally specify an array of imagePullSecrets. | ||||
|  | @ -13,148 +13,208 @@ image: | |||
| podAnnotations: {} | ||||
| podLabels: {} | ||||
| 
 | ||||
| # config shared from ConfigMap and CRD | ||||
| docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p7 | ||||
| enable_shm_volume: true | ||||
| repair_period: 5m | ||||
| resync_period: 5m | ||||
| spilo_privileged: false | ||||
| workers: 4 | ||||
| configTarget: "ConfigMap" | ||||
| 
 | ||||
| # general configuration parameters | ||||
| configGeneral: | ||||
|   # start any new database pod without limitations on shm memory | ||||
|   enable_shm_volume: "true" | ||||
|   # etcd connection string for Patroni. Empty uses K8s-native DCS. | ||||
|   etcd_host: "" | ||||
|   # Spilo docker image | ||||
|   docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p9 | ||||
|   # max number of instances in Postgres cluster. -1 = no limit | ||||
|   min_instances: "-1" | ||||
|   # min number of instances in Postgres cluster. -1 = no limit | ||||
|   max_instances: "-1" | ||||
|   # period between consecutive repair requests | ||||
|   repair_period: 5m | ||||
|   # period between consecutive sync requests | ||||
|   resync_period: 30m | ||||
|   # can prevent certain cases of memory overcommitment | ||||
|   # set_memory_request_to_limit: "false" | ||||
| 
 | ||||
|   # map of sidecar names to docker images | ||||
|   # sidecar_docker_images: "" | ||||
| 
 | ||||
|   # number of routines the operator spawns to process requests concurrently | ||||
|   workers: "4" | ||||
| 
 | ||||
| # parameters describing Postgres users | ||||
| configUsers: | ||||
|   # postgres username used for replication between instances | ||||
|   replication_username: standby | ||||
|   # postgres superuser name to be created by initdb | ||||
|   super_username: postgres | ||||
| 
 | ||||
| configKubernetes: | ||||
|   # default DNS domain of K8s cluster where operator is running | ||||
|   cluster_domain: cluster.local | ||||
|   # inherited_labels: "" | ||||
|   # infrastructure_roles_secret_name: postgresql-infrastructure-roles | ||||
|   # node_readiness_label: "" | ||||
|   # oauth_token_secret_name: postgresql-operator | ||||
|   # pod_environment_configmap: "" | ||||
|   # spilo_fsgroup: "103" | ||||
|   pod_management_policy: "ordered_ready" | ||||
|   pdb_name_format: "postgres-{cluster}-pdb" | ||||
|   pod_role_label: spilo-role | ||||
|   pod_terminate_grace_period: 5m | ||||
|   secret_name_template: '{username}.{cluster}.credentials' | ||||
| 
 | ||||
| configPostgresPodResources: | ||||
|   default_cpu_request: 100m | ||||
|   default_memory_request: 100Mi | ||||
|   default_cpu_limit: "3" | ||||
|   default_memory_limit: 1Gi | ||||
|   # set_memory_request_to_limit: true | ||||
| 
 | ||||
| configTimeouts: | ||||
|   # master_pod_move_timeout: 10m | ||||
|   pod_deletion_wait_timeout: 10m | ||||
|   pod_label_wait_timeout: 10m | ||||
|   ready_wait_interval: 3s | ||||
|   ready_wait_timeout: 30s | ||||
|   resource_check_interval: 3s | ||||
|   resource_check_timeout: 10m | ||||
| 
 | ||||
| configDebug: | ||||
|   debug_logging: true | ||||
|   enable_database_access: true | ||||
| 
 | ||||
| configLoggingRestApi: | ||||
|   api_port: 8080 | ||||
|   cluster_history_entries: 1000 | ||||
|   ring_log_lines: 100 | ||||
| 
 | ||||
| configAwsOrGcp: | ||||
|   aws_region: eu-central-1 | ||||
|   db_hosted_zone: db.example.com | ||||
|   # kube_iam_role: "" | ||||
|   # log_s3_bucket: "" | ||||
|   # wal_s3_bucket: "" | ||||
|   # additional_secret_mount: "some-secret-name" | ||||
|   # additional_secret_mount_path: "/some/dir" | ||||
| 
 | ||||
| configLogicalBackup: | ||||
|   logical_backup_schedule: "30 00 * * *" | ||||
|   logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" | ||||
|   logical_backup_s3_bucket: "" | ||||
| 
 | ||||
| # config exclusive to ConfigMap | ||||
| configMap: | ||||
|   # additional labels assigned to the cluster objects | ||||
|   cluster_labels: application:spilo | ||||
|   # label assigned to Kubernetes objects created by the operator | ||||
|   cluster_name_label: version | ||||
|   # toggles pod anti affinity on the Postgres pods | ||||
|   enable_pod_antiaffinity: "false" | ||||
|   # toggles PDB to set to MinAvailabe 0 or 1 | ||||
|   enable_pod_disruption_budget: "true" | ||||
|   # name of the secret containing infrastructure roles names and passwords | ||||
|   # infrastructure_roles_secret_name: postgresql-infrastructure-roles | ||||
| 
 | ||||
|   # list of labels that can be inherited from the cluster manifest | ||||
|   # inherited_labels: "" | ||||
| 
 | ||||
|   # timeout for successful migration of master pods from unschedulable node | ||||
|   # master_pod_move_timeout: 20m | ||||
| 
 | ||||
|   # set of labels that a running and active node should possess to be considered ready | ||||
|   # node_readiness_label: "" | ||||
| 
 | ||||
|   # name of the secret containing the OAuth2 token to pass to the teams API | ||||
|   # oauth_token_secret_name: postgresql-operator | ||||
| 
 | ||||
|   # defines the template for PDB (Pod Disruption Budget) names | ||||
|   pdb_name_format: "postgres-{cluster}-pdb" | ||||
|   # override topology key for pod anti affinity | ||||
|   pod_antiaffinity_topology_key: "kubernetes.io/hostname" | ||||
|   # name of the ConfigMap with environment variables to populate on every pod | ||||
|   # pod_environment_configmap: "" | ||||
| 
 | ||||
|   # specify the pod management policy of stateful sets of Postgres clusters | ||||
|   pod_management_policy: "ordered_ready" | ||||
|   # label assigned to the Postgres pods (and services/endpoints) | ||||
|   pod_role_label: spilo-role | ||||
|   # Postgres pods are terminated forcefully after this timeout | ||||
|   pod_terminate_grace_period: 5m | ||||
|   # template for database user secrets generated by the operator | ||||
|   secret_name_template: '{username}.{cluster}.credentials' | ||||
|   # group ID with write-access to volumes (required to run Spilo as non-root process) | ||||
|   # spilo_fsgroup: "103" | ||||
| 
 | ||||
|   # whether the Spilo container should run in privileged mode | ||||
|   spilo_privileged: "false" | ||||
|   # operator watches for postgres objects in the given namespace | ||||
|   watched_namespace: "*" # listen to all namespaces | ||||
| 
 | ||||
| # configure resource requests for the Postgres pods | ||||
| configPostgresPodResources: | ||||
|   # CPU limits for the postgres containers | ||||
|   default_cpu_limit: "3" | ||||
|   # cpu request value for the postgres containers | ||||
|   default_cpu_request: 100m | ||||
|   # memory limits for the postgres containers | ||||
|   default_memory_limit: 1Gi | ||||
|   # memory request value for the postgres containers | ||||
|   default_memory_request: 100Mi | ||||
| 
 | ||||
| # timeouts related to some operator actions | ||||
| configTimeouts: | ||||
|   # timeout when waiting for the Postgres pods to be deleted | ||||
|   pod_deletion_wait_timeout: 10m | ||||
|   # timeout when waiting for pod role and cluster labels | ||||
|   pod_label_wait_timeout: 10m | ||||
|   # interval between consecutive attempts waiting for postgresql CRD to be created | ||||
|   ready_wait_interval: 3s | ||||
|   # timeout for the complete postgres CRD creation | ||||
|   ready_wait_timeout: 30s | ||||
|   # interval to wait between consecutive attempts to check for some K8s resources | ||||
|   resource_check_interval: 3s | ||||
|   # timeout when waiting for the presence of a certain K8s resource (e.g. Sts, PDB) | ||||
|   resource_check_timeout: 10m | ||||
| 
 | ||||
| # configure behavior of load balancers | ||||
| configLoadBalancer: | ||||
|   # DNS zone for cluster DNS name when load balancer is configured for cluster | ||||
|   db_hosted_zone: db.example.com | ||||
|   # annotations to apply to service when load balancing is enabled | ||||
|   # custom_service_annotations: | ||||
|   #   "keyx:valuez,keya:valuea" | ||||
| 
 | ||||
|   # toggles service type load balancer pointing to the master pod of the cluster | ||||
|   enable_master_load_balancer: "true" | ||||
|   # toggles service type load balancer pointing to the replica pod of the cluster | ||||
|   enable_replica_load_balancer: "false" | ||||
|   # defines the DNS name string template for the master load balancer cluster | ||||
|   master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}' | ||||
|   # defines the DNS name string template for the replica load balancer cluster | ||||
|   replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}' | ||||
| 
 | ||||
| # options to aid debugging of the operator itself | ||||
| configDebug: | ||||
|   # toggles verbose debug logs from the operator | ||||
|   debug_logging: "true" | ||||
|   # toggles operator functionality that require access to the postgres database | ||||
|   enable_database_access: "true" | ||||
| 
 | ||||
| # parameters affecting logging and REST API listener | ||||
| configLoggingRestApi: | ||||
|   # REST API listener listens to this port | ||||
|   api_port: "8080" | ||||
|   # number of entries in the cluster history ring buffer | ||||
|   cluster_history_entries: "1000" | ||||
|   # number of lines in the ring buffer used to store cluster logs | ||||
|   ring_log_lines: "100" | ||||
| 
 | ||||
| # configure interaction with non-Kubernetes objects from AWS or GCP | ||||
| configAwsOrGcp: | ||||
|   # Additional Secret (aws or gcp credentials) to mount in the pod | ||||
|   # additional_secret_mount: "some-secret-name" | ||||
| 
 | ||||
|   # Path to mount the above Secret in the filesystem of the container(s) | ||||
|   # additional_secret_mount_path: "/some/dir" | ||||
| 
 | ||||
|   # AWS region used to store ESB volumes | ||||
|   aws_region: eu-central-1 | ||||
| 
 | ||||
|   # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods | ||||
|   # kube_iam_role: "" | ||||
| 
 | ||||
|   # S3 bucket to use for shipping postgres daily logs | ||||
|   # log_s3_bucket: "" | ||||
| 
 | ||||
|   # S3 bucket to use for shipping WAL segments with WAL-E | ||||
|   # wal_s3_bucket: "" | ||||
| 
 | ||||
| # configure K8s cron job managed by the operator | ||||
| configLogicalBackup: | ||||
|   # backup schedule in the cron format | ||||
|   logical_backup_schedule: "30 00 * * *" | ||||
|   # image for pods of the logical backup job (example runs pg_dumpall) | ||||
|   logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" | ||||
|   # S3 bucket to store backup results | ||||
|   logical_backup_s3_bucket: "my-bucket-url" | ||||
| 
 | ||||
| # automate creation of human users with teams API service | ||||
| configTeamsApi: | ||||
|   enable_teams_api: "false" | ||||
|   # team_admin_role will have the rights to grant roles coming from PG manifests | ||||
|   # enable_admin_role_for_users: "true" | ||||
| 
 | ||||
|   # toggle to grant superuser to team members created from the Teams API | ||||
|   # enable_team_superuser: "false" | ||||
| 
 | ||||
|   # toggles usage of the Teams API by the operator | ||||
|   enable_teams_api: "false" | ||||
|   # should contain a URL to use for authentication (username and token) | ||||
|   # pam_configuration: https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees | ||||
| 
 | ||||
|   # operator will add all team member roles to this group and add a pg_hba line | ||||
|   # pam_role_name: zalandos | ||||
| 
 | ||||
|   # List of teams which members need the superuser role in each Postgres cluster | ||||
|   # postgres_superuser_teams: "postgres_superusers" | ||||
| 
 | ||||
|   # List of roles that cannot be overwritten by an application, team or infrastructure role | ||||
|   # protected_role_names: "admin" | ||||
| 
 | ||||
|   # role name to grant to team members created from the Teams API | ||||
|   # team_admin_role: "admin" | ||||
| 
 | ||||
|   # postgres config parameters to apply to each team member role | ||||
|   # team_api_role_configuration: "log_statement:all" | ||||
| 
 | ||||
|   # URL of the Teams API service | ||||
|   # teams_api_url: http://fake-teams-api.default.svc.cluster.local | ||||
| 
 | ||||
| # config exclusive to CRD | ||||
| configCRD: | ||||
|   etcd_host: "" | ||||
|   min_instances: -1 | ||||
|   max_instances: -1 | ||||
|   # sidecar_docker_images | ||||
|   #  example: "exampleimage:exampletag" | ||||
| 
 | ||||
| configKubernetesCRD: | ||||
|   cluster_labels: | ||||
|       application: spilo | ||||
|   cluster_name_label: cluster-name | ||||
|   enable_pod_antiaffinity: false | ||||
|   pod_antiaffinity_topology_key: "kubernetes.io/hostname" | ||||
|   enable_pod_disruption_budget: true | ||||
|   secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" | ||||
|   # inherited_labels: | ||||
|   # - application | ||||
|   # - app | ||||
|   # watched_namespace: "" | ||||
| 
 | ||||
| configLoadBalancerCRD: | ||||
|   # custom_service_annotations: | ||||
|   #   keyx: valuez | ||||
|   #   keya: valuea | ||||
|   enable_master_load_balancer: false | ||||
|   enable_replica_load_balancer: false | ||||
|   master_dns_name_format: "{cluster}.{team}.{hostedzone}" | ||||
|   replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" | ||||
| 
 | ||||
| configTeamsApiCRD: | ||||
|   enable_teams_api: false | ||||
|   enable_team_superuser: false | ||||
|   # pam_configuration: "" | ||||
|   pam_role_name: zalandos | ||||
|   # postgres_superuser_teams: "postgres_superusers" | ||||
|   protected_role_names: | ||||
|   - admin | ||||
|   team_admin_role: admin | ||||
|   team_api_role_configuration: | ||||
|     log_statement: all | ||||
|   # teams_api_url: "" | ||||
| 
 | ||||
| scalyr: | ||||
|   scalyr_cpu_request: 100m | ||||
|   scalyr_memory_request: 50Mi | ||||
|   scalyr_cpu_limit: "1" | ||||
|   scalyr_memory_limit: 1Gi | ||||
|   # scalyr_api_key: "" | ||||
|   # scalyr_image: "" | ||||
|   # scalyr_server_url: "" | ||||
| 
 | ||||
| rbac: | ||||
|   # Specifies whether RBAC resources should be created | ||||
|   create: true | ||||
|  |  | |||
|  | @ -113,6 +113,16 @@ Those are top-level keys, containing both leaf keys and groups. | |||
| * **repair_period** | ||||
|   period between consecutive repair requests. The default is `5m`. | ||||
| 
 | ||||
| * **set_memory_request_to_limit** | ||||
|   Set `memory_request` to `memory_limit` for all Postgres clusters (the default | ||||
|   value is also increased). This prevents certain cases of memory overcommitment | ||||
|   at the cost of overprovisioning memory and potential scheduling problems for | ||||
|   containers with high memory limits due to the lack of memory on Kubernetes | ||||
|   cluster nodes. This affects all containers created by the operator (Postgres, | ||||
|   Scalyr sidecar, and other sidecars); to set resources for the operator's own | ||||
|   container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13). | ||||
|   The default is `false`. | ||||
| 
 | ||||
| ## Postgres users | ||||
| 
 | ||||
| Parameters describing Postgres users. In a CRD-configuration, they are grouped | ||||
|  | @ -296,16 +306,6 @@ CRD-based configuration. | |||
|   memory limits for the postgres containers, unless overridden by cluster-specific | ||||
|   settings. The default is `1Gi`. | ||||
| 
 | ||||
| * **set_memory_request_to_limit** | ||||
|   Set `memory_request` to `memory_limit` for all Postgres clusters (the default | ||||
|   value is also increased). This prevents certain cases of memory overcommitment | ||||
|   at the cost of overprovisioning memory and potential scheduling problems for | ||||
|   containers with high memory limits due to the lack of memory on Kubernetes | ||||
|   cluster nodes. This affects all containers created by the operator (Postgres, | ||||
|   Scalyr sidecar, and other sidecars); to set resources for the operator's own | ||||
|   container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13). | ||||
|   The default is `false`. | ||||
| 
 | ||||
| ## Operator timeouts | ||||
| 
 | ||||
| This set of parameters define various timeouts related to some operator | ||||
|  | @ -373,7 +373,7 @@ In the CRD-based configuration they are grouped under the `load_balancer` key. | |||
|   with the hosted zone (the value of the `db_hosted_zone` parameter). No other | ||||
|   placeholders are allowed. | ||||
| 
 | ||||
| ** **replica_dns_name_format** defines the DNS name string template for the | ||||
| * **replica_dns_name_format** defines the DNS name string template for the | ||||
|   replica load balancer cluster.  The default is | ||||
|   `{cluster}-repl.{team}.{hostedzone}`, where `{cluster}` is replaced by the | ||||
|   cluster name, `{team}` is replaced with the team name and `{hostedzone}` is | ||||
|  | @ -478,7 +478,7 @@ key. | |||
|   `https://info.example.com/oauth2/tokeninfo?access_token= uid | ||||
|   realm=/employees`. | ||||
| 
 | ||||
| * **protected_roles** | ||||
| * **protected_role_names** | ||||
|   List of roles that cannot be overwritten by an application, team or | ||||
|   infrastructure role. The default is `admin`. | ||||
| 
 | ||||
|  | @ -528,23 +528,22 @@ scalyr sidecar. In the CRD-based configuration they are grouped under the | |||
| * **scalyr_memory_limit** | ||||
|   Memory limit value for the Scalyr sidecar. The default is `1Gi`. | ||||
| 
 | ||||
| 
 | ||||
| ## Logical backup | ||||
| 
 | ||||
|   These parameters configure a k8s cron job managed by the operator to produce | ||||
|   Postgres logical backups. In the CRD-based configuration those parameters are | ||||
|   grouped under the `logical_backup` key. | ||||
| These parameters configure a k8s cron job managed by the operator to produce | ||||
| Postgres logical backups. In the CRD-based configuration those parameters are | ||||
| grouped under the `logical_backup` key. | ||||
| 
 | ||||
|   * **logical_backup_schedule** | ||||
|     Backup schedule in the cron format. Please take [the reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule) into account. Default: "30 00 \* \* \*" | ||||
| * **logical_backup_schedule** | ||||
|   Backup schedule in the cron format. Please take [the reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule) into account. Default: "30 00 \* \* \*" | ||||
| 
 | ||||
|   * **logical_backup_docker_image** | ||||
|     An image for pods of the logical backup job. The [example image](../../docker/logical-backup/Dockerfile) | ||||
|     runs `pg_dumpall` on a replica if possible and uploads compressed results to | ||||
|     an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`. | ||||
|     The default image is the same image built with the Zalando-internal CI | ||||
|     pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup" | ||||
| * **logical_backup_docker_image** | ||||
|   An image for pods of the logical backup job. The [example image](../../docker/logical-backup/Dockerfile) | ||||
|   runs `pg_dumpall` on a replica if possible and uploads compressed results to | ||||
|   an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`. | ||||
|   The default image is the same image built with the Zalando-internal CI | ||||
|   pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup" | ||||
| 
 | ||||
|   * **logical_backup_s3_bucket** | ||||
|     S3 bucket to store backup results. The bucket has to be present and | ||||
|     accessible by Postgres pods. Default: empty. | ||||
| * **logical_backup_s3_bucket** | ||||
|   S3 bucket to store backup results. The bucket has to be present and | ||||
|   accessible by Postgres pods. Default: empty. | ||||
|  |  | |||
|  | @ -1,9 +1,9 @@ | |||
| apiVersion: "acid.zalan.do/v1" | ||||
| kind: postgresql | ||||
| 
 | ||||
| metadata: | ||||
|   name: acid-test-cluster | ||||
| spec: | ||||
|   dockerImage: registry.opensource.zalan.do/acid/spilo-11:1.5-p9 | ||||
|   initContainers: | ||||
|   - name: date | ||||
|     image: busybox | ||||
|  | @ -11,9 +11,9 @@ spec: | |||
|   teamId: "ACID" | ||||
|   volume: | ||||
|     size: 1Gi | ||||
|     #storageClass: my-sc | ||||
| #   storageClass: my-sc | ||||
|   numberOfInstances: 2 | ||||
|   users: #Application/Robot users | ||||
|   users: # Application/Robot users | ||||
|     zalando: | ||||
|     - superuser | ||||
|     - createdb | ||||
|  | @ -23,8 +23,11 @@ spec: | |||
|   - 127.0.0.1/32 | ||||
|   databases: | ||||
|     foo: zalando | ||||
| #Expert section | ||||
| 
 | ||||
| # Expert section | ||||
| 
 | ||||
|   enableShmVolume: true | ||||
| # spiloFSGroup: 103 | ||||
|   postgresql: | ||||
|     version: "10" | ||||
|     parameters: | ||||
|  | @ -38,7 +41,6 @@ spec: | |||
|     limits: | ||||
|       cpu: 300m | ||||
|       memory: 300Mi | ||||
|   # spiloFSGroup: 103 | ||||
|   patroni: | ||||
|     initdb: | ||||
|       encoding: "UTF8" | ||||
|  | @ -47,42 +49,42 @@ spec: | |||
|     pg_hba: | ||||
|     - hostssl all all 0.0.0.0/0 md5 | ||||
|     - host    all all 0.0.0.0/0 md5 | ||||
|     #slots: | ||||
|     #  permanent_physical_1: | ||||
|     #    type: physical | ||||
|     #  permanent_logical_1: | ||||
|     #    type: logical | ||||
|     #    database: foo | ||||
|     #    plugin: pgoutput | ||||
| #   slots: | ||||
| #     permanent_physical_1: | ||||
| #       type: physical | ||||
| #     permanent_logical_1: | ||||
| #      type: logical | ||||
| #      database: foo | ||||
| #      plugin: pgoutput | ||||
|     ttl: 30 | ||||
|     loop_wait: &loop_wait 10 | ||||
|     retry_timeout: 10 | ||||
|     maximum_lag_on_failover: 33554432 | ||||
|   # restore a Postgres DB with point-in-time-recovery | ||||
|   # with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp | ||||
|   # with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup | ||||
|   # clone: | ||||
|   #  uid: "efd12e58-5786-11e8-b5a7-06148230260c" | ||||
|   #  cluster: "acid-batman" | ||||
|   #  timestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6) | ||||
|   #  s3_wal_path: "s3://custom/path/to/bucket" | ||||
| # restore a Postgres DB with point-in-time-recovery | ||||
| # with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp | ||||
| # with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup | ||||
| #   clone: | ||||
| #     uid: "efd12e58-5786-11e8-b5a7-06148230260c" | ||||
| #     cluster: "acid-batman" | ||||
| #     timestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6) | ||||
| #     s3_wal_path: "s3://custom/path/to/bucket" | ||||
| 
 | ||||
|   # run periodic backups with k8s cron jobs | ||||
|   # enableLogicalBackup: true | ||||
|   # logicalBackupSchedule: "30 00 * * *" | ||||
| # run periodic backups with k8s cron jobs | ||||
| #   enableLogicalBackup: true | ||||
| #   logicalBackupSchedule: "30 00 * * *" | ||||
|   maintenanceWindows: | ||||
|   - 01:00-06:00 #UTC | ||||
|   - Sat:00:00-04:00 | ||||
|   #sidecars: | ||||
|   #  - name: "telegraf-sidecar" | ||||
|   #    image: "telegraf:latest" | ||||
|   #    resources: | ||||
|   #      limits: | ||||
|   #        cpu: 500m | ||||
|   #        memory: 500Mi | ||||
|   #      requests: | ||||
|   #        cpu: 100m | ||||
|   #        memory: 100Mi | ||||
|   #    env: | ||||
|   #      - name: "USEFUL_VAR" | ||||
|   #        value: "perhaps-true" | ||||
| # sidecars: | ||||
| #   - name: "telegraf-sidecar" | ||||
| #     image: "telegraf:latest" | ||||
| #     resources: | ||||
| #       limits: | ||||
| #         cpu: 500m | ||||
| #         memory: 500Mi | ||||
| #       requests: | ||||
| #         cpu: 100m | ||||
| #         memory: 100Mi | ||||
| #      env: | ||||
| #        - name: "USEFUL_VAR" | ||||
| #          value: "perhaps-true" | ||||
|  |  | |||
|  | @ -3,62 +3,78 @@ kind: ConfigMap | |||
| metadata: | ||||
|   name: postgres-operator | ||||
| data: | ||||
|   watched_namespace: "*" # listen to all namespaces | ||||
|   cluster_labels: application:spilo | ||||
|   cluster_name_label: version | ||||
|   pod_role_label: spilo-role | ||||
| 
 | ||||
|   debug_logging: "true" | ||||
|   workers: "4" | ||||
|   docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p7 | ||||
|   pod_service_account_name: "zalando-postgres-operator" | ||||
|   secret_name_template: '{username}.{cluster}.credentials' | ||||
|   cluster_domain: cluster.local | ||||
|   super_username: postgres | ||||
|   enable_teams_api: "false" | ||||
|   spilo_privileged: "false" | ||||
|   # enable_shm_volume: "true" | ||||
|   # custom_service_annotations: | ||||
|   #   "keyx:valuez,keya:valuea" | ||||
|   # set_memory_request_to_limit: "true" | ||||
|   # postgres_superuser_teams: "postgres_superusers" | ||||
|   # enable_team_superuser: "false" | ||||
|   # team_admin_role: "admin" | ||||
|   # enable_admin_role_for_users: "true" | ||||
|   # teams_api_url: http://fake-teams-api.default.svc.cluster.local | ||||
|   # team_api_role_configuration: "log_statement:all" | ||||
|   # infrastructure_roles_secret_name: postgresql-infrastructure-roles | ||||
|   # oauth_token_secret_name: postgresql-operator | ||||
|   # pam_role_name: zalandos | ||||
|   # pam_configuration: | | ||||
|   #  https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees | ||||
|   # inherited_labels: "" | ||||
|   aws_region: eu-central-1 | ||||
|   # additional_secret_mount: "some-secret-name" | ||||
|   # additional_secret_mount_path: "/some/dir" | ||||
|   db_hosted_zone: db.example.com | ||||
|   master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}' | ||||
|   replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}' | ||||
|   enable_master_load_balancer: "true" | ||||
|   enable_replica_load_balancer: "false" | ||||
| 
 | ||||
|   pdb_name_format: "postgres-{cluster}-pdb" | ||||
| 
 | ||||
|   api_port: "8080" | ||||
|   ring_log_lines: "100" | ||||
|   aws_region: eu-central-1 | ||||
|   cluster_domain: cluster.local | ||||
|   cluster_history_entries: "1000" | ||||
|   pod_terminate_grace_period: 5m | ||||
|   cluster_labels: application:spilo | ||||
|   cluster_name_label: version | ||||
|   # custom_service_annotations: | ||||
|   #   "keyx:valuez,keya:valuea" | ||||
|   db_hosted_zone: db.example.com | ||||
|   debug_logging: "true" | ||||
|   # default_cpu_limit: "3" | ||||
|   # default_cpu_request: 100m | ||||
|   # default_memory_limit: 1Gi | ||||
|   # default_memory_request: 100Mi | ||||
|   docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p9 | ||||
|   # enable_admin_role_for_users: "true" | ||||
|   # enable_database_access: "true" | ||||
|   enable_master_load_balancer: "true" | ||||
|   # enable_pod_antiaffinity: "false" | ||||
|   # enable_pod_disruption_budget: "true" | ||||
|   enable_replica_load_balancer: "false" | ||||
|   # enable_shm_volume: "true" | ||||
|   # enable_team_superuser: "false" | ||||
|   enable_teams_api: "false" | ||||
|   # etcd_host: "" | ||||
|   # infrastructure_roles_secret_name: postgresql-infrastructure-roles | ||||
|   # inherited_labels: "" | ||||
|   # kube_iam_role: "" | ||||
|   # log_s3_bucket: "" | ||||
|   # logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" | ||||
|   # logical_backup_s3_bucket: "my-bucket-url" | ||||
|   # logical_backup_schedule: "30 00 * * *" | ||||
|   master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}' | ||||
|   # master_pod_move_timeout: 10m | ||||
|   # max_instances: "-1" | ||||
|   # min_instances: "-1" | ||||
|   # node_readiness_label: "" | ||||
|   # oauth_token_secret_name: postgresql-operator | ||||
|   # pam_configuration: | | ||||
|   #  https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees | ||||
|   # pam_role_name: zalandos | ||||
|   pdb_name_format: "postgres-{cluster}-pdb" | ||||
|   # pod_antiaffinity_topology_key: "kubernetes.io/hostname" | ||||
|   pod_deletion_wait_timeout: 10m | ||||
|   # pod_environment_configmap: "" | ||||
|   pod_label_wait_timeout: 10m | ||||
|   pod_management_policy: "ordered_ready" | ||||
|   pod_role_label: spilo-role | ||||
|   pod_service_account_name: "zalando-postgres-operator" | ||||
|   pod_terminate_grace_period: 5m | ||||
|   # postgres_superuser_teams: "postgres_superusers" | ||||
|   # protected_role_names: "admin" | ||||
|   ready_wait_interval: 3s | ||||
|   ready_wait_timeout: 30s | ||||
|   #  master_pod_move_timeout: 10m | ||||
|   repair_period: 5m | ||||
|   replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}' | ||||
|   replication_username: standby | ||||
|   resource_check_interval: 3s | ||||
|   resource_check_timeout: 10m | ||||
|   resync_period: 5m | ||||
| 
 | ||||
|   # logical_backup_schedule: "30 00 * * *" | ||||
|   # logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" | ||||
|   # logical_backup_s3_bucket: "" | ||||
|   ring_log_lines: "100" | ||||
|   secret_name_template: '{username}.{cluster}.credentials' | ||||
|   # sidecar_docker_images: "" | ||||
|   # set_memory_request_to_limit: "false" | ||||
|   spilo_privileged: "false" | ||||
|   super_username: postgres | ||||
|   # team_admin_role: "admin" | ||||
|   # team_api_role_configuration: "log_statement:all" | ||||
|   # teams_api_url: http://fake-teams-api.default.svc.cluster.local | ||||
|   # toleration: "" | ||||
|   # wal_s3_bucket: "" | ||||
|   watched_namespace: "*" # listen to all namespaces | ||||
|   workers: "4" | ||||
|  |  | |||
|  | @ -9,16 +9,11 @@ spec: | |||
|     size: 1Gi | ||||
|   numberOfInstances: 2 | ||||
|   users: | ||||
|     # database owner | ||||
|     zalando: | ||||
|     zalando: # database owner | ||||
|     - superuser | ||||
|     - createdb | ||||
| 
 | ||||
|     # role for application foo | ||||
|     foo_user: [] | ||||
| 
 | ||||
|   #databases: name->owner | ||||
|     foo_user: [] # role for application foo | ||||
|   databases: | ||||
|     foo: zalando | ||||
|     foo: zalando # dbname: owner | ||||
|   postgresql: | ||||
|     version: "10" | ||||
|  |  | |||
|  | @ -4,57 +4,58 @@ metadata: | |||
|   name: postgresql-operator-default-configuration | ||||
| configuration: | ||||
|   etcd_host: "" | ||||
|   docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p7 | ||||
|   workers: 4 | ||||
|   min_instances: -1 | ||||
|   docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p9 | ||||
|   # enable_shm_volume: true | ||||
|   max_instances: -1 | ||||
|   min_instances: -1 | ||||
|   resync_period: 30m | ||||
|   repair_period: 5m | ||||
|   # enable_shm_volume: true | ||||
| 
 | ||||
|   #sidecar_docker_images: | ||||
|   # example: "exampleimage:exampletag" | ||||
|   # set_memory_request_to_limit: false | ||||
|   # sidecar_docker_images: | ||||
|   #   example: "exampleimage:exampletag" | ||||
|   workers: 4 | ||||
|   users: | ||||
|     super_username: postgres | ||||
|     replication_username: standby | ||||
|     super_username: postgres | ||||
|   kubernetes: | ||||
|     pod_service_account_name: operator | ||||
|     pod_terminate_grace_period: 5m | ||||
|     pdb_name_format: "postgres-{cluster}-pdb" | ||||
|     enable_pod_disruption_budget: true | ||||
|     secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" | ||||
|     cluster_domain: cluster.local | ||||
|     oauth_token_secret_name: postgresql-operator | ||||
|     pod_role_label: spilo-role | ||||
|     # spilo_fsgroup: 103 | ||||
|     spilo_privileged: false | ||||
|     cluster_labels: | ||||
|         application: spilo | ||||
|     cluster_name_label: cluster-name | ||||
|     enable_pod_antiaffinity: false | ||||
|     enable_pod_disruption_budget: true | ||||
|     # infrastructure_roles_secret_name: "" | ||||
|     # inherited_labels: | ||||
|     # - application | ||||
|     # - app | ||||
|     cluster_name_label: cluster-name | ||||
|     # watched_namespace:"" | ||||
|     # node_readiness_label: "" | ||||
|     # toleration: {} | ||||
|     # infrastructure_roles_secret_name: "" | ||||
|     oauth_token_secret_name: postgresql-operator | ||||
|     pdb_name_format: "postgres-{cluster}-pdb" | ||||
|     pod_antiaffinity_topology_key: "kubernetes.io/hostname" | ||||
|     # pod_environment_configmap: "" | ||||
|     pod_management_policy: "ordered_ready" | ||||
|     enable_pod_antiaffinity: false | ||||
|     pod_antiaffinity_topology_key: "kubernetes.io/hostname" | ||||
|     pod_role_label: spilo-role | ||||
|     pod_service_account_name: operator | ||||
|     pod_terminate_grace_period: 5m | ||||
|     secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" | ||||
|     # spilo_fsgroup: 103 | ||||
|     spilo_privileged: false | ||||
|     # toleration: {} | ||||
|     # watched_namespace:"" | ||||
|   postgres_pod_resources: | ||||
|     default_cpu_request: 100m | ||||
|     default_memory_request: 100Mi | ||||
|     default_cpu_limit: "3" | ||||
|     default_cpu_request: 100m | ||||
|     default_memory_limit: 1Gi | ||||
|     default_memory_request: 100Mi | ||||
|   timeouts: | ||||
|     resource_check_interval: 3s | ||||
|     resource_check_timeout: 10m | ||||
|     pod_label_wait_timeout: 10m | ||||
|     pod_deletion_wait_timeout: 10m | ||||
|     ready_wait_interval: 4s | ||||
|     ready_wait_timeout: 30s | ||||
|     resource_check_interval: 3s | ||||
|     resource_check_timeout: 10m | ||||
|   load_balancer: | ||||
|     # db_hosted_zone: "" | ||||
|     enable_master_load_balancer: false | ||||
|     enable_replica_load_balancer: false | ||||
|     # custom_service_annotations: | ||||
|  | @ -63,41 +64,41 @@ configuration: | |||
|     master_dns_name_format: "{cluster}.{team}.{hostedzone}" | ||||
|     replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" | ||||
|   aws_or_gcp: | ||||
|     # db_hosted_zone: "" | ||||
|     # wal_s3_bucket: "" | ||||
|     # log_s3_bucket: "" | ||||
|     # kube_iam_role: "" | ||||
|     aws_region: eu-central-1 | ||||
|     # additional_secret_mount: "some-secret-name" | ||||
|     # additional_secret_mount_path: "/some/dir" | ||||
|     aws_region: eu-central-1 | ||||
|     # kube_iam_role: "" | ||||
|     # log_s3_bucket: "" | ||||
|     # wal_s3_bucket: "" | ||||
|   logical_backup: | ||||
|     logical_backup_schedule: "30 00 * * *" | ||||
|     logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" | ||||
|     logical_backup_s3_bucket: "my-bucket-url" | ||||
|   debug: | ||||
|     debug_logging: true | ||||
|     enable_database_access: true | ||||
|   teams_api: | ||||
|     enable_teams_api: false | ||||
|     team_api_role_configuration: | ||||
|       log_statement: all | ||||
|     # enable_admin_role_for_users: true | ||||
|     enable_team_superuser: false | ||||
|     team_admin_role: admin | ||||
|     pam_role_name: zalandos | ||||
|     enable_teams_api: false | ||||
|     # pam_configuration: "" | ||||
|     pam_role_name: zalandos | ||||
|     # postgres_superuser_teams: "postgres_superusers" | ||||
|     protected_role_names: | ||||
|       - admin | ||||
|     team_admin_role: admin | ||||
|     team_api_role_configuration: | ||||
|       log_statement: all | ||||
|     # teams_api_url: "" | ||||
|     # postgres_superuser_teams: "postgres_superusers" | ||||
|   logging_rest_api: | ||||
|     api_port: 8008 | ||||
|     ring_log_lines: 100 | ||||
|     cluster_history_entries: 1000 | ||||
|     ring_log_lines: 100 | ||||
|   scalyr: | ||||
|     scalyr_cpu_request: 100m | ||||
|     scalyr_memory_request: 50Mi | ||||
|     scalyr_cpu_limit: "1" | ||||
|     scalyr_memory_limit: 1Gi | ||||
|     # scalyr_api_key: "" | ||||
|     scalyr_cpu_limit: "1" | ||||
|     scalyr_cpu_request: 100m | ||||
|     # scalyr_image: "" | ||||
|     scalyr_memory_limit: 1Gi | ||||
|     scalyr_memory_request: 50Mi | ||||
|     # scalyr_server_url: "" | ||||
|   logical_backup: | ||||
|     logical_backup_schedule: "30 00 * * *" | ||||
|     logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" | ||||
|     logical_backup_s3_bucket: "" | ||||
|  |  | |||
|  | @ -11,7 +11,7 @@ spec: | |||
|   numberOfInstances: 1 | ||||
|   postgresql: | ||||
|     version: "10" | ||||
|   # Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming. | ||||
| # Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming. | ||||
|   standby: | ||||
|     s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/" | ||||
| 
 | ||||
|  |  | |||
|  | @ -121,6 +121,7 @@ type TeamsAPIConfiguration struct { | |||
| 	TeamsAPIUrl              string            `json:"teams_api_url,omitempty"` | ||||
| 	TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` | ||||
| 	EnableTeamSuperuser      bool              `json:"enable_team_superuser,omitempty"` | ||||
| 	EnableAdminRoleForUsers  bool              `json:"enable_admin_role_for_users,omitempty"` | ||||
| 	TeamAdminRole            string            `json:"team_admin_role,omitempty"` | ||||
| 	PamRoleName              string            `json:"pam_role_name,omitempty"` | ||||
| 	PamConfiguration         string            `json:"pam_configuration,omitempty"` | ||||
|  | @ -155,12 +156,12 @@ type OperatorConfigurationData struct { | |||
| 	MaxInstances               int32                              `json:"max_instances,omitempty"` | ||||
| 	ResyncPeriod               Duration                           `json:"resync_period,omitempty"` | ||||
| 	RepairPeriod               Duration                           `json:"repair_period,omitempty"` | ||||
| 	SetMemoryRequestToLimit    bool                               `json:"set_memory_request_to_limit,omitempty"` | ||||
| 	ShmVolume                  *bool                              `json:"enable_shm_volume,omitempty"` | ||||
| 	Sidecars                   map[string]string                  `json:"sidecar_docker_images,omitempty"` | ||||
| 	PostgresUsersConfiguration PostgresUsersConfiguration         `json:"users"` | ||||
| 	Kubernetes                 KubernetesMetaConfiguration        `json:"kubernetes"` | ||||
| 	PostgresPodResources       PostgresPodResourcesDefaults       `json:"postgres_pod_resources"` | ||||
| 	SetMemoryRequestToLimit    bool                               `json:"set_memory_request_to_limit,omitempty"` | ||||
| 	Timeouts                   OperatorTimeouts                   `json:"timeouts"` | ||||
| 	LoadBalancer               LoadBalancerConfiguration          `json:"load_balancer"` | ||||
| 	AWSGCP                     AWSGCPConfiguration                `json:"aws_or_gcp"` | ||||
|  |  | |||
|  | @ -60,7 +60,7 @@ type PostgresSpec struct { | |||
| 	LogicalBackupSchedule string               `json:"logicalBackupSchedule,omitempty"` | ||||
| 	StandbyCluster        *StandbyDescription  `json:"standby"` | ||||
| 
 | ||||
| 	// deprectaed json tags
 | ||||
| 	// deprecated json tags
 | ||||
| 	InitContainersOld       []v1.Container `json:"init_containers,omitempty"` | ||||
| 	PodPriorityClassNameOld string         `json:"pod_priority_class_name,omitempty"` | ||||
| } | ||||
|  |  | |||
|  | @ -24,6 +24,7 @@ func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, con | |||
| func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigurationData) *config.Config { | ||||
| 	result := &config.Config{} | ||||
| 
 | ||||
| 	// general config
 | ||||
| 	result.EtcdHost = fromCRD.EtcdHost | ||||
| 	result.DockerImage = fromCRD.DockerImage | ||||
| 	result.Workers = fromCRD.Workers | ||||
|  | @ -31,12 +32,15 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | |||
| 	result.MaxInstances = fromCRD.MaxInstances | ||||
| 	result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod) | ||||
| 	result.RepairPeriod = time.Duration(fromCRD.RepairPeriod) | ||||
| 	result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit | ||||
| 	result.ShmVolume = fromCRD.ShmVolume | ||||
| 	result.Sidecars = fromCRD.Sidecars | ||||
| 
 | ||||
| 	// user config
 | ||||
| 	result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername | ||||
| 	result.ReplicationUsername = fromCRD.PostgresUsersConfiguration.ReplicationUsername | ||||
| 
 | ||||
| 	// kubernetes config
 | ||||
| 	result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName | ||||
| 	result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition | ||||
| 	result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition | ||||
|  | @ -59,16 +63,16 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | |||
| 	result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName | ||||
| 	result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy | ||||
| 	result.MasterPodMoveTimeout = fromCRD.Kubernetes.MasterPodMoveTimeout | ||||
| 
 | ||||
| 	result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity | ||||
| 	result.PodAntiAffinityTopologyKey = fromCRD.Kubernetes.PodAntiAffinityTopologyKey | ||||
| 
 | ||||
| 	// Postgres Pod resources
 | ||||
| 	result.DefaultCPURequest = fromCRD.PostgresPodResources.DefaultCPURequest | ||||
| 	result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest | ||||
| 	result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit | ||||
| 	result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit | ||||
| 	result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit | ||||
| 
 | ||||
| 	// timeout config
 | ||||
| 	result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval) | ||||
| 	result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout) | ||||
| 	result.PodLabelWaitTimeout = time.Duration(fromCRD.Timeouts.PodLabelWaitTimeout) | ||||
|  | @ -76,6 +80,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | |||
| 	result.ReadyWaitInterval = time.Duration(fromCRD.Timeouts.ReadyWaitInterval) | ||||
| 	result.ReadyWaitTimeout = time.Duration(fromCRD.Timeouts.ReadyWaitTimeout) | ||||
| 
 | ||||
| 	// load balancer config
 | ||||
| 	result.DbHostedZone = fromCRD.LoadBalancer.DbHostedZone | ||||
| 	result.EnableMasterLoadBalancer = fromCRD.LoadBalancer.EnableMasterLoadBalancer | ||||
| 	result.EnableReplicaLoadBalancer = fromCRD.LoadBalancer.EnableReplicaLoadBalancer | ||||
|  | @ -83,6 +88,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | |||
| 	result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat | ||||
| 	result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat | ||||
| 
 | ||||
| 	// AWS or GCP config
 | ||||
| 	result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket | ||||
| 	result.AWSRegion = fromCRD.AWSGCP.AWSRegion | ||||
| 	result.LogS3Bucket = fromCRD.AWSGCP.LogS3Bucket | ||||
|  | @ -90,20 +96,33 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | |||
| 	result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount | ||||
| 	result.AdditionalSecretMountPath = fromCRD.AWSGCP.AdditionalSecretMountPath | ||||
| 
 | ||||
| 	// logical backup config
 | ||||
| 	result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule | ||||
| 	result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage | ||||
| 	result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket | ||||
| 
 | ||||
| 	// debug config
 | ||||
| 	result.DebugLogging = fromCRD.OperatorDebug.DebugLogging | ||||
| 	result.EnableDBAccess = fromCRD.OperatorDebug.EnableDBAccess | ||||
| 
 | ||||
| 	// Teams API config
 | ||||
| 	result.EnableTeamsAPI = fromCRD.TeamsAPI.EnableTeamsAPI | ||||
| 	result.TeamsAPIUrl = fromCRD.TeamsAPI.TeamsAPIUrl | ||||
| 	result.TeamAPIRoleConfiguration = fromCRD.TeamsAPI.TeamAPIRoleConfiguration | ||||
| 	result.EnableTeamSuperuser = fromCRD.TeamsAPI.EnableTeamSuperuser | ||||
| 	result.EnableAdminRoleForUsers = fromCRD.TeamsAPI.EnableAdminRoleForUsers | ||||
| 	result.TeamAdminRole = fromCRD.TeamsAPI.TeamAdminRole | ||||
| 	result.PamRoleName = fromCRD.TeamsAPI.PamRoleName | ||||
| 	result.PamConfiguration = fromCRD.TeamsAPI.PamConfiguration | ||||
| 	result.ProtectedRoles = fromCRD.TeamsAPI.ProtectedRoles | ||||
| 	result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams | ||||
| 
 | ||||
| 	// logging REST API config
 | ||||
| 	result.APIPort = fromCRD.LoggingRESTAPI.APIPort | ||||
| 	result.RingLogLines = fromCRD.LoggingRESTAPI.RingLogLines | ||||
| 	result.ClusterHistoryEntries = fromCRD.LoggingRESTAPI.ClusterHistoryEntries | ||||
| 
 | ||||
| 	// Scalyr config
 | ||||
| 	result.ScalyrAPIKey = fromCRD.Scalyr.ScalyrAPIKey | ||||
| 	result.ScalyrImage = fromCRD.Scalyr.ScalyrImage | ||||
| 	result.ScalyrServerURL = fromCRD.Scalyr.ScalyrServerURL | ||||
|  | @ -112,9 +131,5 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | |||
| 	result.ScalyrCPULimit = fromCRD.Scalyr.ScalyrCPULimit | ||||
| 	result.ScalyrMemoryLimit = fromCRD.Scalyr.ScalyrMemoryLimit | ||||
| 
 | ||||
| 	result.LogicalBackupSchedule = fromCRD.LogicalBackup.Schedule | ||||
| 	result.LogicalBackupDockerImage = fromCRD.LogicalBackup.DockerImage | ||||
| 	result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket | ||||
| 
 | ||||
| 	return result | ||||
| } | ||||
|  |  | |||
|  | @ -85,7 +85,7 @@ type Config struct { | |||
| 
 | ||||
| 	WatchedNamespace string            `name:"watched_namespace"`    // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
 | ||||
| 	EtcdHost         string            `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use k8s as a DCS
 | ||||
| 	DockerImage      string            `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-11:1.5-p7"` | ||||
| 	DockerImage      string            `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-11:1.5-p9"` | ||||
| 	Sidecars         map[string]string `name:"sidecar_docker_images"` | ||||
| 	// default name `operator` enables backward compatibility with the older ServiceAccountName field
 | ||||
| 	PodServiceAccountName string `name:"pod_service_account_name" default:"operator"` | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue