From 7232326159a9f77766260bc3e7b49f79dd101bd8 Mon Sep 17 00:00:00 2001 From: ReSearchITEng Date: Thu, 9 Apr 2020 10:16:45 +0300 Subject: [PATCH 1/3] Fix val docs (#901) * missing quotes in pooler configmap in values.yaml * missing quotes in pooler configmap in values-crd.yaml * docs clarifications * helm3 --skip-crds * Update docs/user.md Co-Authored-By: Felix Kunde * details moved in docs Co-authored-by: Felix Kunde --- charts/postgres-operator/values-crd.yaml | 5 +++-- charts/postgres-operator/values.yaml | 5 +++-- docs/reference/cluster_manifest.md | 8 ++++++-- docs/user.md | 3 ++- manifests/complete-postgres-manifest.yaml | 3 +++ 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 37ad6e45e..caa4dda4d 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -277,11 +277,11 @@ configConnectionPooler: # docker image connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer" # max db connections the pooler should hold - connection_pooler_max_db_connections: 60 + connection_pooler_max_db_connections: "60" # default pooling mode connection_pooler_mode: "transaction" # number of pooler instances - connection_pooler_number_of_instances: 2 + connection_pooler_number_of_instances: "2" # default resources connection_pooler_default_cpu_request: 500m connection_pooler_default_memory_request: 100Mi @@ -294,6 +294,7 @@ rbac: crd: # Specifies whether custom resource definitions should be created + # When using helm3, this is ignored; instead use "--skip-crds" to skip. create: true serviceAccount: diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 3a701bb64..e7db249f0 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -254,11 +254,11 @@ configConnectionPooler: # docker image connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer" # max db connections the pooler should hold - connection_pooler_max_db_connections: 60 + connection_pooler_max_db_connections: "60" # default pooling mode connection_pooler_mode: "transaction" # number of pooler instances - connection_pooler_number_of_instances: 2 + connection_pooler_number_of_instances: "2" # default resources connection_pooler_default_cpu_request: 500m connection_pooler_default_memory_request: 100Mi @@ -271,6 +271,7 @@ rbac: crd: # Specifies whether custom resource definitions should be created + # When using helm3, this is ignored; instead use "--skip-crds" to skip. create: true serviceAccount: diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 83dedabd3..0cbcdc08e 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -419,5 +419,9 @@ Those parameters are grouped under the `tls` top-level key. Filename of the private key. Defaults to "tls.key". * **caFile** - Optional filename to the CA certificate. Useful when the client connects - with `sslmode=verify-ca` or `sslmode=verify-full`. Default is empty. + Optional filename to the CA certificate (e.g. "ca.crt"). Useful when the + client connects with `sslmode=verify-ca` or `sslmode=verify-full`. + Default is empty. + + Optionally one can provide full path for any of them. By default it is + relative to the "/tls/", which is mount path of the tls secret. diff --git a/docs/user.md b/docs/user.md index 1be50a01a..bb12fd2e1 100644 --- a/docs/user.md +++ b/docs/user.md @@ -584,7 +584,8 @@ don't know the value, use `103` which is the GID from the default spilo image OpenShift allocates the users and groups dynamically (based on scc), and their range is different in every namespace. Due to this dynamic behaviour, it's not trivial to know at deploy time the uid/gid of the user in the cluster. -This way, in OpenShift, you may want to skip the spilo_fsgroup setting. +Therefore, instead of using a global `spilo_fsgroup` setting, use the `spiloFSGroup` field +per Postgres cluster.``` Upload the cert as a kubernetes secret: ```sh diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 3a87254cf..6de6db11e 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -125,5 +125,8 @@ spec: certificateFile: "tls.crt" privateKeyFile: "tls.key" caFile: "" # optionally configure Postgres with a CA certificate +# file names can be also defined with absolute path, and will no longer be relative +# to the "/tls/" path where the secret is being mounted by default. # When TLS is enabled, also set spiloFSGroup parameter above to the relevant value. # if unknown, set it to 103 which is the usual value in the default spilo images. +# In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup. From a1f2bd05b978b4dac384eb14a06a39f590cf5f57 Mon Sep 17 00:00:00 2001 From: Dmitry Dolgov <9erthalion6@gmail.com> Date: Thu, 9 Apr 2020 09:21:45 +0200 Subject: [PATCH 2/3] Prevent superuser from being a connection pool user (#906) * Protected and system users can't be a connection pool user It's not supported, neither it's a best practice. Also fix potential null pointer access. For protected users it makes sense by intent of protecting this users (e.g. from being overriden or used as something else than supposed). For system users the reason is the same as for superuser, it's about replicastion user and it's under patroni control. This is implemented on both levels, operator config and postgresql manifest. For the latter we just use default name in this case, assuming that operator config is always correct. For the former, since it's a serious misconfiguration, operator panics. --- pkg/cluster/cluster.go | 17 +++++++++++++--- pkg/cluster/cluster_test.go | 34 +++++++++++++++++++++++++++++++ pkg/cluster/resources.go | 5 +++++ pkg/controller/operator_config.go | 5 +++++ pkg/util/config/config.go | 6 ++++++ 5 files changed, 64 insertions(+), 3 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index cde2dc260..51dd368fb 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -877,9 +877,20 @@ func (c *Cluster) initSystemUsers() { c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{} } - username := util.Coalesce( - c.Spec.ConnectionPooler.User, - c.OpConfig.ConnectionPooler.User) + // Using superuser as pooler user is not a good idea. First of all it's + // not going to be synced correctly with the current implementation, + // and second it's a bad practice. + username := c.OpConfig.ConnectionPooler.User + + isSuperUser := c.Spec.ConnectionPooler.User == c.OpConfig.SuperUsername + isProtectedUser := c.shouldAvoidProtectedOrSystemRole( + c.Spec.ConnectionPooler.User, "connection pool role") + + if !isSuperUser && !isProtectedUser { + username = util.Coalesce( + c.Spec.ConnectionPooler.User, + c.OpConfig.ConnectionPooler.User) + } // connection pooler application should be able to login with this role connectionPoolerUser := spec.PgUser{ diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index af3092ad5..432f53132 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -721,4 +721,38 @@ func TestInitSystemUsers(t *testing.T) { if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; !exist { t.Errorf("%s, connection pooler user is not present", testName) } + + // superuser is not allowed as connection pool user + cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ + User: "postgres", + } + cl.OpConfig.SuperUsername = "postgres" + cl.OpConfig.ConnectionPooler.User = "pooler" + + cl.initSystemUsers() + if _, exist := cl.pgUsers["pooler"]; !exist { + t.Errorf("%s, Superuser is not allowed to be a connection pool user", testName) + } + + // neither protected users are + delete(cl.pgUsers, "pooler") + cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ + User: "admin", + } + cl.OpConfig.ProtectedRoles = []string{"admin"} + + cl.initSystemUsers() + if _, exist := cl.pgUsers["pooler"]; !exist { + t.Errorf("%s, Protected user are not allowed to be a connection pool user", testName) + } + + delete(cl.pgUsers, "pooler") + cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ + User: "standby", + } + + cl.initSystemUsers() + if _, exist := cl.pgUsers["pooler"]; !exist { + t.Errorf("%s, System users are not allowed to be a connection pool user", testName) + } } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 4c341aefe..b38458af8 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -105,7 +105,12 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo var msg string c.setProcessName("creating connection pooler") + if c.ConnectionPooler == nil { + c.ConnectionPooler = &ConnectionPoolerObjects{} + } + schema := c.Spec.ConnectionPooler.Schema + if schema == "" { schema = c.OpConfig.ConnectionPooler.Schema } diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index a9c36f995..07be90f22 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -169,6 +169,11 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur fromCRD.ConnectionPooler.User, constants.ConnectionPoolerUserName) + if result.ConnectionPooler.User == result.SuperUsername { + msg := "Connection pool user is not allowed to be the same as super user, username: %s" + panic(fmt.Errorf(msg, result.ConnectionPooler.User)) + } + result.ConnectionPooler.Image = util.Coalesce( fromCRD.ConnectionPooler.Image, "registry.opensource.zalan.do/acid/pgbouncer") diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 0a7bac835..84a62c0fd 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -218,5 +218,11 @@ func validate(cfg *Config) (err error) { msg := "number of connection pooler instances should be higher than %d" err = fmt.Errorf(msg, constants.ConnectionPoolerMinInstances) } + + if cfg.ConnectionPooler.User == cfg.SuperUsername { + msg := "Connection pool user is not allowed to be the same as super user, username: %s" + err = fmt.Errorf(msg, cfg.ConnectionPooler.User) + } + return } From ea3eef45d95c62ff1f4a4c825744835031a7b54f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thierry=20Sall=C3=A9?= Date: Wed, 15 Apr 2020 09:13:35 +0200 Subject: [PATCH 3/3] Additional volumes capability (#736) * Allow additional Volumes to be mounted * added TargetContainers option to determine if additional volume need to be mounter or not * fixed dependencies * updated manifest additional volume example * More validation Check that there are no volume mount path clashes or "all" vs ["a", "b"] mixtures. Also change the default behaviour to mount to "postgres" container. * More documentation / example about additional volumes * Revert go.sum and go.mod from origin/master * Declare addictionalVolume specs in CRDs * fixed k8sres after rebase * resolv conflict Co-authored-by: Dmitrii Dolgov <9erthalion6@gmail.com> Co-authored-by: Thierry --- .../postgres-operator/crds/postgresqls.yaml | 22 +++ docs/reference/cluster_manifest.md | 12 ++ manifests/complete-postgres-manifest.yaml | 23 +++ manifests/postgresql.crd.yaml | 22 +++ pkg/apis/acid.zalan.do/v1/crds.go | 31 ++++ pkg/apis/acid.zalan.do/v1/postgresql_type.go | 9 + pkg/cluster/k8sres.go | 82 ++++++++- pkg/cluster/k8sres_test.go | 169 ++++++++++++++++++ 8 files changed, 364 insertions(+), 6 deletions(-) diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index f64080ed5..3c666b9ab 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -74,6 +74,28 @@ spec: - teamId - postgresql properties: + additionalVolumes: + type: array + items: + type: object + required: + - name + - mountPath + - volumeSource + properties: + name: + type: string + mountPath: + type: string + targetContainers: + type: array + nullable: true + items: + type: string + volumeSource: + type: object + subPath: + type: string allowedSourceRanges: type: array nullable: true diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 0cbcdc08e..361e32780 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -154,6 +154,18 @@ These parameters are grouped directly under the `spec` key in the manifest. [the reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule) into account. Optional. Default is: "30 00 \* \* \*" +* **additionalVolumes** + List of additional volumes to mount in each container of the statefulset pod. + Each item must contain a `name`, `mountPath`, and `volumeSource` which is a + [kubernetes volumeSource](https://godoc.org/k8s.io/api/core/v1#VolumeSource). + It allows you to mount existing PersistentVolumeClaims, ConfigMaps and Secrets inside the StatefulSet. + Also an `emptyDir` volume can be shared between initContainer and statefulSet. + Additionaly, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example). + You can also specify in which container the additional Volumes will be mounted with the `targetContainers` array option. + If `targetContainers` is empty, additional volumes will be mounted only in the `postgres` container. + If you set the `all` special item, it will be mounted in all containers (postgres + sidecars). + Else you can set the list of target containers in which the additional volumes will be mounted (eg : postgres, telegraf) + ## Postgres parameters Those parameters are grouped under the `postgresql` top-level key, which is diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 6de6db11e..23b0b6096 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -12,6 +12,29 @@ spec: volume: size: 1Gi # storageClass: my-sc + additionalVolumes: + - name: data + mountPath: /home/postgres/pgdata/partitions + targetContainers: + - postgres + volumeSource: + PersistentVolumeClaim: + claimName: pvc-postgresql-data-partitions + readyOnly: false + - name: conf + mountPath: /etc/telegraf + subPath: telegraf.conf + targetContainers: + - telegraf-sidecar + volumeSource: + configMap: + name: my-config-map + - name: empty + mountPath: /opt/empty + targetContainers: + - all + volumeSource: + emptyDir: {} numberOfInstances: 2 users: # Application/Robot users zalando: diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index f8631f0b7..c9d60d60a 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -38,6 +38,28 @@ spec: - teamId - postgresql properties: + additionalVolumes: + type: array + items: + type: object + required: + - name + - mountPath + - volumeSource + properties: + name: + type: string + mountPath: + type: string + targetContainers: + type: array + nullable: true + items: + type: string + volumeSource: + type: object + subPath: + type: string allowedSourceRanges: type: array nullable: true diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 60dd66ba8..d693d0e15 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -682,6 +682,37 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, }, }, + "additionalVolumes": { + Type: "array", + Items: &apiextv1beta1.JSONSchemaPropsOrArray{ + Schema: &apiextv1beta1.JSONSchemaProps{ + Type: "object", + Required: []string{"name", "mountPath", "volumeSource"}, + Properties: map[string]apiextv1beta1.JSONSchemaProps{ + "name": { + Type: "string", + }, + "mountPath": { + Type: "string", + }, + "targetContainers": { + Type: "array", + Items: &apiextv1beta1.JSONSchemaPropsOrArray{ + Schema: &apiextv1beta1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + "volumeSource": { + Type: "object", + }, + "subPath": { + Type: "string", + }, + }, + }, + }, + }, }, }, "status": { diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index b1b6a36a6..04b70cba8 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -67,6 +67,7 @@ type PostgresSpec struct { PodAnnotations map[string]string `json:"podAnnotations"` ServiceAnnotations map[string]string `json:"serviceAnnotations"` TLS *TLSDescription `json:"tls"` + AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"` // deprecated json tags InitContainersOld []v1.Container `json:"init_containers,omitempty"` @@ -98,6 +99,14 @@ type Volume struct { SubPath string `json:"subPath,omitempty"` } +type AdditionalVolume struct { + Name string `json:"name"` + MountPath string `json:"mountPath"` + SubPath string `json:"subPath"` + TargetContainers []string `json:"targetContainers"` + VolumeSource v1.VolumeSource `json:"volume"` +} + // PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values. type PostgresqlParam struct { PgVersion string `json:"version"` diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 8de61bdea..e6f53af9d 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -500,7 +500,7 @@ func mountShmVolumeNeeded(opConfig config.Config, spec *acidv1.PostgresSpec) *bo return opConfig.ShmVolume } -func generatePodTemplate( +func (c *Cluster) generatePodTemplate( namespace string, labels labels.Set, annotations map[string]string, @@ -520,6 +520,7 @@ func generatePodTemplate( additionalSecretMount string, additionalSecretMountPath string, volumes []v1.Volume, + additionalVolumes []acidv1.AdditionalVolume, ) (*v1.PodTemplateSpec, error) { terminateGracePeriodSeconds := terminateGracePeriod @@ -559,6 +560,10 @@ func generatePodTemplate( addSecretVolume(&podSpec, additionalSecretMount, additionalSecretMountPath) } + if additionalVolumes != nil { + c.addAdditionalVolumes(&podSpec, additionalVolumes) + } + template := v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, @@ -1084,7 +1089,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef annotations := c.generatePodAnnotations(spec) // generate pod template for the statefulset, based on the spilo container and sidecars - podTemplate, err = generatePodTemplate( + podTemplate, err = c.generatePodTemplate( c.Namespace, c.labelsSet(true), annotations, @@ -1104,7 +1109,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef c.OpConfig.AdditionalSecretMount, c.OpConfig.AdditionalSecretMountPath, volumes, - ) + spec.AdditionalVolumes) + if err != nil { return nil, fmt.Errorf("could not generate pod template: %v", err) } @@ -1298,6 +1304,69 @@ func addSecretVolume(podSpec *v1.PodSpec, additionalSecretMount string, addition podSpec.Volumes = volumes } +func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec, + additionalVolumes []acidv1.AdditionalVolume) { + + volumes := podSpec.Volumes + mountPaths := map[string]acidv1.AdditionalVolume{} + for i, v := range additionalVolumes { + if previousVolume, exist := mountPaths[v.MountPath]; exist { + msg := "Volume %+v cannot be mounted to the same path as %+v" + c.logger.Warningf(msg, v, previousVolume) + continue + } + + if v.MountPath == constants.PostgresDataMount { + msg := "Cannot mount volume on postgresql data directory, %+v" + c.logger.Warningf(msg, v) + continue + } + + if v.TargetContainers == nil { + spiloContainer := podSpec.Containers[0] + additionalVolumes[i].TargetContainers = []string{spiloContainer.Name} + } + + for _, target := range v.TargetContainers { + if target == "all" && len(v.TargetContainers) != 1 { + msg := `Target containers could be either "all" or a list + of containers, mixing those is not allowed, %+v` + c.logger.Warningf(msg, v) + continue + } + } + + volumes = append(volumes, + v1.Volume{ + Name: v.Name, + VolumeSource: v.VolumeSource, + }, + ) + + mountPaths[v.MountPath] = v + } + + c.logger.Infof("Mount additional volumes: %+v", additionalVolumes) + + for i := range podSpec.Containers { + mounts := podSpec.Containers[i].VolumeMounts + for _, v := range additionalVolumes { + for _, target := range v.TargetContainers { + if podSpec.Containers[i].Name == target || target == "all" { + mounts = append(mounts, v1.VolumeMount{ + Name: v.Name, + MountPath: v.MountPath, + SubPath: v.SubPath, + }) + } + } + } + podSpec.Containers[i].VolumeMounts = mounts + } + + podSpec.Volumes = volumes +} + func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) { var storageClassName *string @@ -1702,7 +1771,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { annotations := c.generatePodAnnotations(&c.Spec) // re-use the method that generates DB pod templates - if podTemplate, err = generatePodTemplate( + if podTemplate, err = c.generatePodTemplate( c.Namespace, labels, annotations, @@ -1721,8 +1790,9 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { "", c.OpConfig.AdditionalSecretMount, c.OpConfig.AdditionalSecretMountPath, - nil); err != nil { - return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err) + nil, + []acidv1.AdditionalVolume{}); err != nil { + return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err) } // overwrite specific params of logical backups pods diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 0930279d2..5b55e988c 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1021,3 +1021,172 @@ func TestTLS(t *testing.T) { assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"}) assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"}) } + +func TestAdditionalVolume(t *testing.T) { + testName := "TestAdditionalVolume" + tests := []struct { + subTest string + podSpec *v1.PodSpec + volumePos int + }{ + { + subTest: "empty PodSpec", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{}, + Containers: []v1.Container{ + { + VolumeMounts: []v1.VolumeMount{}, + }, + }, + }, + volumePos: 0, + }, + { + subTest: "non empty PodSpec", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{{}}, + Containers: []v1.Container{ + { + Name: "postgres", + VolumeMounts: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + }, + }, + }, + }, + }, + volumePos: 1, + }, + { + subTest: "non empty PodSpec with sidecar", + podSpec: &v1.PodSpec{ + Volumes: []v1.Volume{{}}, + Containers: []v1.Container{ + { + Name: "postgres", + VolumeMounts: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + }, + }, + }, + { + Name: "sidecar", + VolumeMounts: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + }, + }, + }, + }, + }, + volumePos: 1, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) + + for _, tt := range tests { + // Test with additional volume mounted in all containers + additionalVolumeMount := []acidv1.AdditionalVolume{ + { + Name: "test", + MountPath: "/test", + TargetContainers: []string{"all"}, + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + } + + numMounts := len(tt.podSpec.Containers[0].VolumeMounts) + + cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount) + volumeName := tt.podSpec.Volumes[tt.volumePos].Name + + if volumeName != additionalVolumeMount[0].Name { + t.Errorf("%s %s: Expected volume %v was not created, have %s instead", + testName, tt.subTest, additionalVolumeMount, volumeName) + } + + for i := range tt.podSpec.Containers { + volumeMountName := tt.podSpec.Containers[i].VolumeMounts[tt.volumePos].Name + + if volumeMountName != additionalVolumeMount[0].Name { + t.Errorf("%s %s: Expected mount %v was not created, have %s instead", + testName, tt.subTest, additionalVolumeMount, volumeMountName) + } + + } + + numMountsCheck := len(tt.podSpec.Containers[0].VolumeMounts) + + if numMountsCheck != numMounts+1 { + t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", + numMountsCheck, numMounts+1) + } + } + + for _, tt := range tests { + // Test with additional volume mounted only in first container + additionalVolumeMount := []acidv1.AdditionalVolume{ + { + Name: "test", + MountPath: "/test", + TargetContainers: []string{"postgres"}, + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + } + + numMounts := len(tt.podSpec.Containers[0].VolumeMounts) + + cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount) + volumeName := tt.podSpec.Volumes[tt.volumePos].Name + + if volumeName != additionalVolumeMount[0].Name { + t.Errorf("%s %s: Expected volume %v was not created, have %s instead", + testName, tt.subTest, additionalVolumeMount, volumeName) + } + + for _, container := range tt.podSpec.Containers { + if container.Name == "postgres" { + volumeMountName := container.VolumeMounts[tt.volumePos].Name + + if volumeMountName != additionalVolumeMount[0].Name { + t.Errorf("%s %s: Expected mount %v was not created, have %s instead", + testName, tt.subTest, additionalVolumeMount, volumeMountName) + } + + numMountsCheck := len(container.VolumeMounts) + if numMountsCheck != numMounts+1 { + t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", + numMountsCheck, numMounts+1) + } + } else { + numMountsCheck := len(container.VolumeMounts) + if numMountsCheck == numMounts+1 { + t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", + numMountsCheck, numMounts) + } + } + } + } +}