Merge branch 'master' of https://github.com/zalando/postgres-operator into cross_namespace_secret
This commit is contained in:
commit
b2171beb67
|
|
@ -9,7 +9,7 @@ assignees: ''
|
|||
|
||||
Please, answer some short questions which should help us to understand your problem / question better?
|
||||
|
||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.2
|
||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.3
|
||||
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
|
||||
- **Are you running Postgres Operator in production?** [yes | no]
|
||||
- **Type of issue?** [Bug report, question, feature request, etc.]
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ We introduce the major version into the backup path to smoothen the [major versi
|
|||
The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths.
|
||||
This comes at potential performance costs and should be disabled after a few days.
|
||||
|
||||
The newest Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p6`
|
||||
The newest Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p7`
|
||||
|
||||
The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5`
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator-ui
|
||||
version: 1.6.2
|
||||
appVersion: 1.6.2
|
||||
version: 1.6.3
|
||||
appVersion: 1.6.3
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,31 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator-ui:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.3
|
||||
created: "2021-05-27T19:04:33.425637932+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
digest: 08b810aa632dcc719e4785ef184e391267f7c460caa99677f2d00719075aac78
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.6.3.tgz
|
||||
version: 1.6.3
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.2
|
||||
created: "2021-04-06T16:47:40.993908218+02:00"
|
||||
created: "2021-05-27T19:04:33.422124263+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
digest: 14d1559bb0bd1e1e828f2daaaa6f6ac9ffc268d79824592c3589b55dd39241f6
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -25,7 +47,7 @@ entries:
|
|||
version: 1.6.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-04-06T16:47:40.993378451+02:00"
|
||||
created: "2021-05-27T19:04:33.419640902+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
digest: 3d321352f2f1e7bb7450aa8876e3d818aa9f9da9bd4250507386f0490f2c1969
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -47,7 +69,7 @@ entries:
|
|||
version: 1.6.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2021-04-06T16:47:40.992871656+02:00"
|
||||
created: "2021-05-27T19:04:33.41788193+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
digest: 1e0aa1e7db3c1daa96927ffbf6fdbcdb434562f961833cb5241ddbe132220ee4
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -69,7 +91,7 @@ entries:
|
|||
version: 1.6.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.0
|
||||
created: "2021-04-06T16:47:40.992346484+02:00"
|
||||
created: "2021-05-27T19:04:33.416056821+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
digest: c91ea39e6d51d57f4048fb1b6ec53b40823f2690eb88e4e4f1a036367b9fdd61
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -89,4 +111,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-ui-1.5.0.tgz
|
||||
version: 1.5.0
|
||||
generated: "2021-04-06T16:47:40.991668273+02:00"
|
||||
generated: "2021-05-27T19:04:33.41380858+02:00"
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator-ui
|
||||
tag: v1.6.2
|
||||
tag: v1.6.3
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator
|
||||
version: 1.6.2
|
||||
appVersion: 1.6.2
|
||||
version: 1.6.3
|
||||
appVersion: 1.6.3
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ spec:
|
|||
properties:
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p6"
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p7"
|
||||
enable_crd_validation:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -397,7 +397,7 @@ spec:
|
|||
properties:
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ spec:
|
|||
additionalPrinterColumns:
|
||||
- name: Team
|
||||
type: string
|
||||
description: Team responsible for Postgres CLuster
|
||||
description: Team responsible for Postgres cluster
|
||||
jsonPath: .spec.teamId
|
||||
- name: Version
|
||||
type: string
|
||||
|
|
|
|||
|
|
@ -1,9 +1,30 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.3
|
||||
created: "2021-05-27T19:04:25.199523943+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
digest: ea08f991bf23c9ad114bca98ebcbe3e2fa15beab163061399394905eaee89b35
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.6.3.tgz
|
||||
version: 1.6.3
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.2
|
||||
created: "2021-03-30T17:00:50.171986449+02:00"
|
||||
created: "2021-05-27T19:04:25.198182197+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
digest: d886f8a0879ca07d1e5246ee7bc55710e1c872f3977280fe495db6fc2057a7f4
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -24,7 +45,7 @@ entries:
|
|||
version: 1.6.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-03-30T17:00:50.170294515+02:00"
|
||||
created: "2021-05-27T19:04:25.19687586+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
digest: 4ba5972cd486dcaa2d11c5613a6f97f6b7b831822e610fe9e10a57ea1db23556
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -45,7 +66,7 @@ entries:
|
|||
version: 1.6.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2021-03-30T17:00:50.168493689+02:00"
|
||||
created: "2021-05-27T19:04:25.195600766+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
digest: f52149718ea364f46b4b9eec9a65f6253ad182bb78df541d14cd5277b9c8a8c3
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -66,7 +87,7 @@ entries:
|
|||
version: 1.6.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.0
|
||||
created: "2021-03-30T17:00:50.166722286+02:00"
|
||||
created: "2021-05-27T19:04:25.193985032+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
digest: 198351d5db52e65cdf383d6f3e1745d91ac1e2a01121f8476f8b1be728b09531
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -85,4 +106,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-1.5.0.tgz
|
||||
version: 1.5.0
|
||||
generated: "2021-03-30T17:00:50.165166707+02:00"
|
||||
generated: "2021-05-27T19:04:25.191897769+02:00"
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.6.2
|
||||
tag: v1.6.3
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -32,7 +32,7 @@ configGeneral:
|
|||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: false
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: -1
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -265,7 +265,7 @@ configAwsOrGcp:
|
|||
# configure K8s cron job managed by the operator
|
||||
configLogicalBackup:
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
# path of google cloud service account json file
|
||||
# logical_backup_google_application_credentials: ""
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.6.2
|
||||
tag: v1.6.3
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -35,7 +35,7 @@ configGeneral:
|
|||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: "-1"
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -255,7 +255,7 @@ configAwsOrGcp:
|
|||
# configure K8s cron job managed by the operator
|
||||
configLogicalBackup:
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
# path of google cloud service account json file
|
||||
# logical_backup_google_application_credentials: ""
|
||||
|
||||
|
|
|
|||
|
|
@ -950,7 +950,7 @@ make docker
|
|||
|
||||
# build in image in minikube docker env
|
||||
eval $(minikube docker-env)
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.6.2 .
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.6.3 .
|
||||
|
||||
# apply UI manifests next to a running Postgres Operator
|
||||
kubectl apply -f manifests/
|
||||
|
|
|
|||
|
|
@ -595,7 +595,7 @@ grouped under the `logical_backup` key.
|
|||
runs `pg_dumpall` on a replica if possible and uploads compressed results to
|
||||
an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`.
|
||||
The default image is the same image built with the Zalando-internal CI
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
|
||||
* **logical_backup_google_application_credentials**
|
||||
Specifies the path of the google cloud service account json file. Default is empty.
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||
spec:
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
teamId: "acid"
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ data:
|
|||
# default_memory_request: 100Mi
|
||||
# delete_annotation_date_key: delete-date
|
||||
# delete_annotation_name_key: delete-clustername
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||
# enable_admin_role_for_users: "true"
|
||||
# enable_crd_validation: "true"
|
||||
|
|
@ -64,7 +64,7 @@ data:
|
|||
# inherited_labels: application,environment
|
||||
# kube_iam_role: ""
|
||||
# log_s3_bucket: ""
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ spec:
|
|||
properties:
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p6"
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p7"
|
||||
enable_crd_validation:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -393,7 +393,7 @@ spec:
|
|||
properties:
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.2
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ kind: OperatorConfiguration
|
|||
metadata:
|
||||
name: postgresql-operator-default-configuration
|
||||
configuration:
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
# enable_crd_validation: true
|
||||
# enable_lazy_spilo_upgrade: false
|
||||
enable_pgversion_env_var: true
|
||||
|
|
@ -123,7 +123,7 @@ configuration:
|
|||
# wal_gs_bucket: ""
|
||||
# wal_s3_bucket: ""
|
||||
logical_backup:
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ spec:
|
|||
additionalPrinterColumns:
|
||||
- name: Team
|
||||
type: string
|
||||
description: Team responsible for Postgres CLuster
|
||||
description: Team responsible for Postgres cluster
|
||||
jsonPath: .spec.teamId
|
||||
- name: Version
|
||||
type: string
|
||||
|
|
|
|||
|
|
@ -462,7 +462,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
|
||||
if len(c.Statefulset.Spec.Template.Spec.Volumes) != len(statefulSet.Spec.Template.Spec.Volumes) {
|
||||
needsReplace = true
|
||||
reasons = append(reasons, fmt.Sprintf("new statefulset's Volumes contains different number of volumes to the old one"))
|
||||
reasons = append(reasons, "new statefulset's volumes contains different number of volumes to the old one")
|
||||
}
|
||||
|
||||
// we assume any change in priority happens by rolling out a new priority class
|
||||
|
|
@ -476,7 +476,9 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
|
||||
// lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image
|
||||
// until they are re-created for other reasons, for example node rotation
|
||||
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) {
|
||||
effectivePodImage := getPostgresContainer(&c.Statefulset.Spec.Template.Spec).Image
|
||||
desiredImage := getPostgresContainer(&statefulSet.Spec.Template.Spec).Image
|
||||
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(effectivePodImage, desiredImage) {
|
||||
needsReplace = true
|
||||
reasons = append(reasons, "lazy Spilo update: new statefulset's pod image does not match the current one")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,10 +66,6 @@ type spiloConfiguration struct {
|
|||
Bootstrap pgBootstrap `json:"bootstrap"`
|
||||
}
|
||||
|
||||
func (c *Cluster) containerName() string {
|
||||
return "postgres"
|
||||
}
|
||||
|
||||
func (c *Cluster) statefulSetName() string {
|
||||
return c.Name
|
||||
}
|
||||
|
|
@ -1157,10 +1153,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
c.logger.Debugf("Generating Spilo container, environment variables")
|
||||
c.logger.Debugf("%v", spiloEnvVars)
|
||||
|
||||
spiloContainer := generateContainer(c.containerName(),
|
||||
spiloContainer := generateContainer(constants.PostgresContainerName,
|
||||
&effectiveDockerImage,
|
||||
resourceRequirements,
|
||||
deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
|
||||
deduplicateEnvVars(spiloEnvVars, constants.PostgresContainerName, c.logger),
|
||||
volumeMounts,
|
||||
c.OpConfig.Resources.SpiloPrivileged,
|
||||
c.OpConfig.Resources.SpiloAllowPrivilegeEscalation,
|
||||
|
|
@ -1392,6 +1388,9 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
|||
//
|
||||
// see https://docs.okd.io/latest/dev_guide/shared_memory.html
|
||||
func addShmVolume(podSpec *v1.PodSpec) {
|
||||
|
||||
postgresContainerIdx := 0
|
||||
|
||||
volumes := append(podSpec.Volumes, v1.Volume{
|
||||
Name: constants.ShmVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
|
|
@ -1401,14 +1400,20 @@ func addShmVolume(podSpec *v1.PodSpec) {
|
|||
},
|
||||
})
|
||||
|
||||
pgIdx := constants.PostgresContainerIdx
|
||||
mounts := append(podSpec.Containers[pgIdx].VolumeMounts,
|
||||
for i, container := range podSpec.Containers {
|
||||
if container.Name == constants.PostgresContainerName {
|
||||
postgresContainerIdx = i
|
||||
}
|
||||
}
|
||||
|
||||
mounts := append(podSpec.Containers[postgresContainerIdx].VolumeMounts,
|
||||
v1.VolumeMount{
|
||||
Name: constants.ShmVolumeName,
|
||||
MountPath: constants.ShmVolumePath,
|
||||
})
|
||||
|
||||
podSpec.Containers[0].VolumeMounts = mounts
|
||||
podSpec.Containers[postgresContainerIdx].VolumeMounts = mounts
|
||||
|
||||
podSpec.Volumes = volumes
|
||||
}
|
||||
|
||||
|
|
@ -1439,54 +1444,55 @@ func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec,
|
|||
|
||||
volumes := podSpec.Volumes
|
||||
mountPaths := map[string]acidv1.AdditionalVolume{}
|
||||
for i, v := range additionalVolumes {
|
||||
if previousVolume, exist := mountPaths[v.MountPath]; exist {
|
||||
for i, additionalVolume := range additionalVolumes {
|
||||
if previousVolume, exist := mountPaths[additionalVolume.MountPath]; exist {
|
||||
msg := "Volume %+v cannot be mounted to the same path as %+v"
|
||||
c.logger.Warningf(msg, v, previousVolume)
|
||||
c.logger.Warningf(msg, additionalVolume, previousVolume)
|
||||
continue
|
||||
}
|
||||
|
||||
if v.MountPath == constants.PostgresDataMount {
|
||||
if additionalVolume.MountPath == constants.PostgresDataMount {
|
||||
msg := "Cannot mount volume on postgresql data directory, %+v"
|
||||
c.logger.Warningf(msg, v)
|
||||
c.logger.Warningf(msg, additionalVolume)
|
||||
continue
|
||||
}
|
||||
|
||||
if v.TargetContainers == nil {
|
||||
spiloContainer := podSpec.Containers[0]
|
||||
additionalVolumes[i].TargetContainers = []string{spiloContainer.Name}
|
||||
// if no target container is defined assign it to postgres container
|
||||
if len(additionalVolume.TargetContainers) == 0 {
|
||||
postgresContainer := getPostgresContainer(podSpec)
|
||||
additionalVolumes[i].TargetContainers = []string{postgresContainer.Name}
|
||||
}
|
||||
|
||||
for _, target := range v.TargetContainers {
|
||||
if target == "all" && len(v.TargetContainers) != 1 {
|
||||
for _, target := range additionalVolume.TargetContainers {
|
||||
if target == "all" && len(additionalVolume.TargetContainers) != 1 {
|
||||
msg := `Target containers could be either "all" or a list
|
||||
of containers, mixing those is not allowed, %+v`
|
||||
c.logger.Warningf(msg, v)
|
||||
c.logger.Warningf(msg, additionalVolume)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
volumes = append(volumes,
|
||||
v1.Volume{
|
||||
Name: v.Name,
|
||||
VolumeSource: v.VolumeSource,
|
||||
Name: additionalVolume.Name,
|
||||
VolumeSource: additionalVolume.VolumeSource,
|
||||
},
|
||||
)
|
||||
|
||||
mountPaths[v.MountPath] = v
|
||||
mountPaths[additionalVolume.MountPath] = additionalVolume
|
||||
}
|
||||
|
||||
c.logger.Infof("Mount additional volumes: %+v", additionalVolumes)
|
||||
|
||||
for i := range podSpec.Containers {
|
||||
mounts := podSpec.Containers[i].VolumeMounts
|
||||
for _, v := range additionalVolumes {
|
||||
for _, target := range v.TargetContainers {
|
||||
for _, additionalVolume := range additionalVolumes {
|
||||
for _, target := range additionalVolume.TargetContainers {
|
||||
if podSpec.Containers[i].Name == target || target == "all" {
|
||||
mounts = append(mounts, v1.VolumeMount{
|
||||
Name: v.Name,
|
||||
MountPath: v.MountPath,
|
||||
SubPath: v.SubPath,
|
||||
Name: additionalVolume.Name,
|
||||
MountPath: additionalVolume.MountPath,
|
||||
SubPath: additionalVolume.SubPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -425,6 +425,7 @@ func TestShmVolume(t *testing.T) {
|
|||
Volumes: []v1.Volume{{}},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "postgres",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{},
|
||||
},
|
||||
|
|
@ -436,9 +437,10 @@ func TestShmVolume(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
addShmVolume(tt.podSpec)
|
||||
postgresContainer := getPostgresContainer(tt.podSpec)
|
||||
|
||||
volumeName := tt.podSpec.Volumes[tt.shmPos].Name
|
||||
volumeMountName := tt.podSpec.Containers[0].VolumeMounts[tt.shmPos].Name
|
||||
volumeMountName := postgresContainer.VolumeMounts[tt.shmPos].Name
|
||||
|
||||
if volumeName != constants.ShmVolumeName {
|
||||
t.Errorf("%s %s: Expected volume %s was not created, have %s instead",
|
||||
|
|
@ -610,8 +612,9 @@ func TestSecretVolume(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
additionalSecretMount := "aws-iam-s3-role"
|
||||
additionalSecretMountPath := "/meta/credentials"
|
||||
postgresContainer := getPostgresContainer(tt.podSpec)
|
||||
|
||||
numMounts := len(tt.podSpec.Containers[0].VolumeMounts)
|
||||
numMounts := len(postgresContainer.VolumeMounts)
|
||||
|
||||
addSecretVolume(tt.podSpec, additionalSecretMount, additionalSecretMountPath)
|
||||
|
||||
|
|
@ -631,7 +634,8 @@ func TestSecretVolume(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
numMountsCheck := len(tt.podSpec.Containers[0].VolumeMounts)
|
||||
postgresContainer = getPostgresContainer(tt.podSpec)
|
||||
numMountsCheck := len(postgresContainer.VolumeMounts)
|
||||
|
||||
if numMountsCheck != numMounts+1 {
|
||||
t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v",
|
||||
|
|
@ -863,7 +867,8 @@ func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole)
|
|||
"CONNECTION_POOLER_PORT": false,
|
||||
}
|
||||
|
||||
envs := podSpec.Spec.Containers[0].Env
|
||||
container := getPostgresContainer(&podSpec.Spec)
|
||||
envs := container.Env
|
||||
for _, env := range envs {
|
||||
required[env.Name] = true
|
||||
}
|
||||
|
|
@ -1043,180 +1048,132 @@ func TestTLS(t *testing.T) {
|
|||
}
|
||||
assert.Contains(t, sts.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
|
||||
|
||||
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||
postgresContainer := getPostgresContainer(&sts.Spec.Template.Spec)
|
||||
assert.Contains(t, postgresContainer.VolumeMounts, v1.VolumeMount{
|
||||
MountPath: "/tls",
|
||||
Name: "my-secret",
|
||||
}, "the volume gets mounted in /tls")
|
||||
|
||||
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
|
||||
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
|
||||
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
|
||||
assert.Contains(t, postgresContainer.Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
|
||||
assert.Contains(t, postgresContainer.Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
|
||||
assert.Contains(t, postgresContainer.Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
|
||||
}
|
||||
|
||||
func TestAdditionalVolume(t *testing.T) {
|
||||
testName := "TestAdditionalVolume"
|
||||
tests := []struct {
|
||||
subTest string
|
||||
podSpec *v1.PodSpec
|
||||
volumePos int
|
||||
}{
|
||||
|
||||
client, _ := newFakeK8sTestClient()
|
||||
clusterName := "acid-test-cluster"
|
||||
namespace := "default"
|
||||
sidecarName := "sidecar"
|
||||
additionalVolumes := []acidv1.AdditionalVolume{
|
||||
{
|
||||
subTest: "empty PodSpec",
|
||||
podSpec: &v1.PodSpec{
|
||||
Volumes: []v1.Volume{},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
VolumeMounts: []v1.VolumeMount{},
|
||||
},
|
||||
},
|
||||
Name: "test1",
|
||||
MountPath: "/test1",
|
||||
TargetContainers: []string{"all"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
volumePos: 0,
|
||||
},
|
||||
{
|
||||
subTest: "non empty PodSpec",
|
||||
podSpec: &v1.PodSpec{
|
||||
Volumes: []v1.Volume{{}},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "postgres",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "data",
|
||||
ReadOnly: false,
|
||||
MountPath: "/data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Name: "test2",
|
||||
MountPath: "/test2",
|
||||
TargetContainers: []string{sidecarName},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
volumePos: 1,
|
||||
},
|
||||
{
|
||||
subTest: "non empty PodSpec with sidecar",
|
||||
podSpec: &v1.PodSpec{
|
||||
Volumes: []v1.Volume{{}},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "postgres",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "data",
|
||||
ReadOnly: false,
|
||||
MountPath: "/data",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "sidecar",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "data",
|
||||
ReadOnly: false,
|
||||
MountPath: "/data",
|
||||
},
|
||||
},
|
||||
},
|
||||
Name: "test3",
|
||||
MountPath: "/test3",
|
||||
TargetContainers: []string{}, // should mount only to postgres
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "test4",
|
||||
MountPath: "/test4",
|
||||
TargetContainers: nil, // should mount only to postgres
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pg := acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
AdditionalVolumes: additionalVolumes,
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
{
|
||||
Name: sidecarName,
|
||||
},
|
||||
},
|
||||
volumePos: 1,
|
||||
},
|
||||
}
|
||||
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
DefaultCPURequest: "300m",
|
||||
DefaultCPULimit: "300m",
|
||||
DefaultMemoryRequest: "300Mi",
|
||||
DefaultMemoryLimit: "300Mi",
|
||||
PodRoleLabel: "spilo-role",
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
}, client, pg, logger, eventRecorder)
|
||||
|
||||
for _, tt := range tests {
|
||||
// Test with additional volume mounted in all containers
|
||||
additionalVolumeMount := []acidv1.AdditionalVolume{
|
||||
{
|
||||
Name: "test",
|
||||
MountPath: "/test",
|
||||
TargetContainers: []string{"all"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
// create a statefulset
|
||||
sts, err := cluster.createStatefulSet()
|
||||
assert.NoError(t, err)
|
||||
|
||||
numMounts := len(tt.podSpec.Containers[0].VolumeMounts)
|
||||
|
||||
cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount)
|
||||
volumeName := tt.podSpec.Volumes[tt.volumePos].Name
|
||||
|
||||
if volumeName != additionalVolumeMount[0].Name {
|
||||
t.Errorf("%s %s: Expected volume %v was not created, have %s instead",
|
||||
testName, tt.subTest, additionalVolumeMount, volumeName)
|
||||
}
|
||||
|
||||
for i := range tt.podSpec.Containers {
|
||||
volumeMountName := tt.podSpec.Containers[i].VolumeMounts[tt.volumePos].Name
|
||||
|
||||
if volumeMountName != additionalVolumeMount[0].Name {
|
||||
t.Errorf("%s %s: Expected mount %v was not created, have %s instead",
|
||||
testName, tt.subTest, additionalVolumeMount, volumeMountName)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
numMountsCheck := len(tt.podSpec.Containers[0].VolumeMounts)
|
||||
|
||||
if numMountsCheck != numMounts+1 {
|
||||
t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v",
|
||||
numMountsCheck, numMounts+1)
|
||||
}
|
||||
tests := []struct {
|
||||
subTest string
|
||||
container string
|
||||
expectedMounts []string
|
||||
}{
|
||||
{
|
||||
subTest: "checking volume mounts of postgres container",
|
||||
container: constants.PostgresContainerName,
|
||||
expectedMounts: []string{"pgdata", "test1", "test3", "test4"},
|
||||
},
|
||||
{
|
||||
subTest: "checking volume mounts of sidecar container",
|
||||
container: "sidecar",
|
||||
expectedMounts: []string{"pgdata", "test1", "test2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
// Test with additional volume mounted only in first container
|
||||
additionalVolumeMount := []acidv1.AdditionalVolume{
|
||||
{
|
||||
Name: "test",
|
||||
MountPath: "/test",
|
||||
TargetContainers: []string{"postgres"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, container := range sts.Spec.Template.Spec.Containers {
|
||||
if container.Name != tt.container {
|
||||
continue
|
||||
}
|
||||
mounts := []string{}
|
||||
for _, volumeMounts := range container.VolumeMounts {
|
||||
mounts = append(mounts, volumeMounts.Name)
|
||||
}
|
||||
|
||||
numMounts := len(tt.podSpec.Containers[0].VolumeMounts)
|
||||
|
||||
cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount)
|
||||
volumeName := tt.podSpec.Volumes[tt.volumePos].Name
|
||||
|
||||
if volumeName != additionalVolumeMount[0].Name {
|
||||
t.Errorf("%s %s: Expected volume %v was not created, have %s instead",
|
||||
testName, tt.subTest, additionalVolumeMount, volumeName)
|
||||
}
|
||||
|
||||
for _, container := range tt.podSpec.Containers {
|
||||
if container.Name == "postgres" {
|
||||
volumeMountName := container.VolumeMounts[tt.volumePos].Name
|
||||
|
||||
if volumeMountName != additionalVolumeMount[0].Name {
|
||||
t.Errorf("%s %s: Expected mount %v was not created, have %s instead",
|
||||
testName, tt.subTest, additionalVolumeMount, volumeMountName)
|
||||
}
|
||||
|
||||
numMountsCheck := len(container.VolumeMounts)
|
||||
if numMountsCheck != numMounts+1 {
|
||||
t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v",
|
||||
numMountsCheck, numMounts+1)
|
||||
}
|
||||
} else {
|
||||
numMountsCheck := len(container.VolumeMounts)
|
||||
if numMountsCheck == numMounts+1 {
|
||||
t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v",
|
||||
numMountsCheck, numMounts)
|
||||
}
|
||||
if !util.IsEqualIgnoreOrder(mounts, tt.expectedMounts) {
|
||||
t.Errorf("%s %s: different volume mounts: got %v, epxected %v",
|
||||
testName, tt.subTest, mounts, tt.expectedMounts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -357,8 +357,8 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
// and
|
||||
// (b) some of the pods were not restarted when the lazy update was still in place
|
||||
for _, pod := range pods {
|
||||
effectivePodImage := pod.Spec.Containers[0].Image
|
||||
stsImage := desiredSts.Spec.Template.Spec.Containers[0].Image
|
||||
effectivePodImage := getPostgresContainer(&pod.Spec).Image
|
||||
stsImage := getPostgresContainer(&desiredSts.Spec.Template.Spec).Image
|
||||
|
||||
if stsImage != effectivePodImage {
|
||||
if err = c.markRollingUpdateFlagForPod(&pod, "pod not yet restarted due to lazy update"); err != nil {
|
||||
|
|
|
|||
|
|
@ -227,6 +227,20 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU
|
|||
}
|
||||
}
|
||||
|
||||
func getPostgresContainer(podSpec *v1.PodSpec) (pgContainer v1.Container) {
|
||||
for _, container := range podSpec.Containers {
|
||||
if container.Name == constants.PostgresContainerName {
|
||||
pgContainer = container
|
||||
}
|
||||
}
|
||||
|
||||
// if no postgres container was found, take the first one in the podSpec
|
||||
if reflect.DeepEqual(pgContainer, v1.Container{}) && len(podSpec.Containers) > 0 {
|
||||
pgContainer = podSpec.Containers[0]
|
||||
}
|
||||
return pgContainer
|
||||
}
|
||||
|
||||
func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
||||
|
||||
if teamID == "" {
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.0-p6")
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.0-p7")
|
||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
result.MaxInstances = fromCRD.MaxInstances
|
||||
|
|
@ -152,7 +152,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
|
||||
// logical backup config
|
||||
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.6.2")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.6.3")
|
||||
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
||||
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
|
||||
result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@ type Resources struct {
|
|||
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
||||
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||
SpiloRunAsUser *int64 `name:"spilo_runasuser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `name:"spilo_runasgroup,omitempty"`
|
||||
SpiloRunAsUser *int64 `name:"spilo_runasuser"`
|
||||
SpiloRunAsGroup *int64 `name:"spilo_runasgroup"`
|
||||
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
||||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
||||
|
|
@ -114,7 +114,7 @@ type Scalyr struct {
|
|||
// LogicalBackup defines configuration for logical backup
|
||||
type LogicalBackup struct {
|
||||
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
|
||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.6.2"`
|
||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.6.3"`
|
||||
LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"`
|
||||
LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""`
|
||||
LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""`
|
||||
|
|
@ -152,7 +152,7 @@ type Config struct {
|
|||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.0-p6"`
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.0-p7"`
|
||||
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
|
||||
SidecarContainers []v1.Container `name:"sidecars"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import "time"
|
|||
// General kubernetes-related constants
|
||||
const (
|
||||
PostgresContainerName = "postgres"
|
||||
PostgresContainerIdx = 0
|
||||
K8sAPIPath = "/apis"
|
||||
|
||||
QueueResyncPeriodPod = 5 * time.Minute
|
||||
|
|
|
|||
Loading…
Reference in New Issue