158 lines
		
	
	
		
			4.4 KiB
		
	
	
	
		
			YAML
		
	
	
	
			
		
		
	
	
			158 lines
		
	
	
		
			4.4 KiB
		
	
	
	
		
			YAML
		
	
	
	
| apiVersion: "acid.zalan.do/v1"
 | |
| kind: postgresql
 | |
| metadata:
 | |
|   name: acid-test-cluster
 | |
| #  labels:
 | |
| #    environment: demo
 | |
| #  annotations:
 | |
| #    "acid.zalan.do/controller": "second-operator"
 | |
| spec:
 | |
|   dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
 | |
|   teamId: "acid"
 | |
|   numberOfInstances: 2
 | |
|   users:  # Application/Robot users
 | |
|     zalando:
 | |
|     - superuser
 | |
|     - createdb
 | |
|   enableMasterLoadBalancer: false
 | |
|   enableReplicaLoadBalancer: false
 | |
| #  enableConnectionPooler: true  # not needed when connectionPooler section is present (see below)
 | |
|   allowedSourceRanges:  # load balancers' source ranges for both master and replica services
 | |
|   - 127.0.0.1/32
 | |
|   databases:
 | |
|     foo: zalando
 | |
|   postgresql:
 | |
|     version: "12"
 | |
|     parameters: # Expert section
 | |
|       shared_buffers: "32MB"
 | |
|       max_connections: "10"
 | |
|       log_statement: "all"
 | |
|   volume:
 | |
|     size: 1Gi
 | |
| #    storageClass: my-sc
 | |
|   additionalVolumes:
 | |
|     - name: data
 | |
|       mountPath: /home/postgres/pgdata/partitions
 | |
|       targetContainers:
 | |
|         - postgres
 | |
|       volumeSource:
 | |
|         PersistentVolumeClaim:
 | |
|           claimName: pvc-postgresql-data-partitions
 | |
|           readyOnly: false
 | |
|     - name: conf
 | |
|       mountPath: /etc/telegraf
 | |
|       subPath: telegraf.conf
 | |
|       targetContainers:
 | |
|         - telegraf-sidecar
 | |
|       volumeSource:
 | |
|         configMap:
 | |
|           name: my-config-map
 | |
|     - name: empty
 | |
|       mountPath: /opt/empty
 | |
|       targetContainers:
 | |
|         - all
 | |
|       volumeSource:
 | |
|         emptyDir: {}
 | |
| 
 | |
|   enableShmVolume: true
 | |
| #  spiloFSGroup: 103
 | |
| #  podAnnotations:
 | |
| #    annotation.key: value
 | |
| #  serviceAnnotations:
 | |
| #    annotation.key: value
 | |
| #  podPriorityClassName: "spilo-pod-priority"
 | |
| #  tolerations:
 | |
| #  - key: postgres
 | |
| #    operator: Exists
 | |
| #    effect: NoSchedule
 | |
|   resources:
 | |
|     requests:
 | |
|       cpu: 10m
 | |
|       memory: 100Mi
 | |
|     limits:
 | |
|       cpu: 500m
 | |
|       memory: 500Mi
 | |
|   patroni:
 | |
|     initdb:
 | |
|       encoding: "UTF8"
 | |
|       locale: "en_US.UTF-8"
 | |
|       data-checksums: "true"
 | |
|     pg_hba:
 | |
|     - hostssl all all 0.0.0.0/0 md5
 | |
|     - host    all all 0.0.0.0/0 md5
 | |
| #    slots:
 | |
| #      permanent_physical_1:
 | |
| #        type: physical
 | |
| #      permanent_logical_1:
 | |
| #        type: logical
 | |
| #        database: foo
 | |
| #        plugin: pgoutput
 | |
|     ttl: 30
 | |
|     loop_wait: &loop_wait 10
 | |
|     retry_timeout: 10
 | |
|     synchronous_mode: false
 | |
|     synchronous_mode_strict: false
 | |
|     maximum_lag_on_failover: 33554432
 | |
| 
 | |
| # restore a Postgres DB with point-in-time-recovery
 | |
| # with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp
 | |
| # with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup
 | |
| #  clone:
 | |
| #    uid: "efd12e58-5786-11e8-b5a7-06148230260c"
 | |
| #    cluster: "acid-batman"
 | |
| #    timestamp: "2017-12-19T12:40:33+01:00"  # timezone required (offset relative to UTC, see RFC 3339 section 5.6)
 | |
| #    s3_wal_path: "s3://custom/path/to/bucket"
 | |
| 
 | |
| # run periodic backups with k8s cron jobs
 | |
| #  enableLogicalBackup: true
 | |
| #  logicalBackupSchedule: "30 00 * * *"
 | |
| 
 | |
| #  maintenanceWindows:
 | |
| #  - 01:00-06:00  #UTC
 | |
| #  - Sat:00:00-04:00
 | |
| 
 | |
|   connectionPooler:
 | |
|     numberOfInstances: 2
 | |
|     mode: "transaction"
 | |
|     schema: "pooler"
 | |
|     user: "pooler"
 | |
|     resources:
 | |
|       requests:
 | |
|         cpu: 300m
 | |
|         memory: 100Mi
 | |
|       limits:
 | |
|         cpu: "1"
 | |
|         memory: 100Mi
 | |
| 
 | |
|   initContainers:
 | |
|   - name: date
 | |
|     image: busybox
 | |
|     command: [ "/bin/date" ]
 | |
| #  sidecars:
 | |
| #    - name: "telegraf-sidecar"
 | |
| #      image: "telegraf:latest"
 | |
| #      resources:
 | |
| #        limits:
 | |
| #          cpu: 500m
 | |
| #          memory: 500Mi
 | |
| #        requests:
 | |
| #          cpu: 100m
 | |
| #          memory: 100Mi
 | |
| #      env:
 | |
| #        - name: "USEFUL_VAR"
 | |
| #          value: "perhaps-true"
 | |
| 
 | |
| # Custom TLS certificate. Disabled unless tls.secretName has a value.
 | |
|   tls:
 | |
|     secretName: ""  # should correspond to a Kubernetes Secret resource to load
 | |
|     certificateFile: "tls.crt"
 | |
|     privateKeyFile: "tls.key"
 | |
|     caFile: ""  # optionally configure Postgres with a CA certificate
 | |
|     caSecretName: "" # optionally the ca.crt can come from this secret instead.
 | |
| # file names can be also defined with absolute path, and will no longer be relative
 | |
| # to the "/tls/" path where the secret is being mounted by default, and "/tlsca/"
 | |
| # where the caSecret is mounted by default.
 | |
| # When TLS is enabled, also set spiloFSGroup parameter above to the relevant value.
 | |
| # if unknown, set it to 103 which is the usual value in the default spilo images.
 | |
| # In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup.
 |