Merge branch 'master' into delete-volume-on-scale-down
This commit is contained in:
		
						commit
						5fa3885519
					
				|  | @ -74,6 +74,28 @@ spec: | |||
|             - teamId | ||||
|             - postgresql | ||||
|           properties: | ||||
|             additionalVolumes: | ||||
|               type: array | ||||
|               items: | ||||
|                 type: object | ||||
|                 required: | ||||
|                   - name | ||||
|                   - mountPath | ||||
|                   - volumeSource | ||||
|                 properties: | ||||
|                   name: | ||||
|                     type: string | ||||
|                   mountPath: | ||||
|                     type: string | ||||
|                   targetContainers: | ||||
|                     type: array | ||||
|                     nullable: true | ||||
|                     items: | ||||
|                       type: string | ||||
|                   volumeSource: | ||||
|                     type: object | ||||
|                   subPath: | ||||
|                     type: string | ||||
|             allowedSourceRanges: | ||||
|               type: array | ||||
|               nullable: true | ||||
|  |  | |||
|  | @ -279,11 +279,11 @@ configConnectionPooler: | |||
|   # docker image | ||||
|   connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer" | ||||
|   # max db connections the pooler should hold | ||||
|   connection_pooler_max_db_connections: 60 | ||||
|   connection_pooler_max_db_connections: "60" | ||||
|   # default pooling mode | ||||
|   connection_pooler_mode: "transaction" | ||||
|   # number of pooler instances | ||||
|   connection_pooler_number_of_instances: 2 | ||||
|   connection_pooler_number_of_instances: "2" | ||||
|   # default resources | ||||
|   connection_pooler_default_cpu_request: 500m | ||||
|   connection_pooler_default_memory_request: 100Mi | ||||
|  | @ -296,6 +296,7 @@ rbac: | |||
| 
 | ||||
| crd: | ||||
|   # Specifies whether custom resource definitions should be created | ||||
|   # When using helm3, this is ignored; instead use "--skip-crds" to skip. | ||||
|   create: true | ||||
| 
 | ||||
| serviceAccount: | ||||
|  |  | |||
|  | @ -256,11 +256,11 @@ configConnectionPooler: | |||
|   # docker image | ||||
|   connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer" | ||||
|   # max db connections the pooler should hold | ||||
|   connection_pooler_max_db_connections: 60 | ||||
|   connection_pooler_max_db_connections: "60" | ||||
|   # default pooling mode | ||||
|   connection_pooler_mode: "transaction" | ||||
|   # number of pooler instances | ||||
|   connection_pooler_number_of_instances: 2 | ||||
|   connection_pooler_number_of_instances: "2" | ||||
|   # default resources | ||||
|   connection_pooler_default_cpu_request: 500m | ||||
|   connection_pooler_default_memory_request: 100Mi | ||||
|  | @ -273,6 +273,7 @@ rbac: | |||
| 
 | ||||
| crd: | ||||
|   # Specifies whether custom resource definitions should be created | ||||
|   # When using helm3, this is ignored; instead use "--skip-crds" to skip. | ||||
|   create: true | ||||
| 
 | ||||
| serviceAccount: | ||||
|  |  | |||
|  | @ -154,6 +154,18 @@ These parameters are grouped directly under  the `spec` key in the manifest. | |||
|   [the reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule) | ||||
|   into account. Optional. Default is: "30 00 \* \* \*" | ||||
| 
 | ||||
| * **additionalVolumes** | ||||
|   List of additional volumes to mount in each container of the statefulset pod. | ||||
|   Each item must contain a `name`, `mountPath`, and `volumeSource` which is a | ||||
|   [kubernetes volumeSource](https://godoc.org/k8s.io/api/core/v1#VolumeSource). | ||||
|   It allows you to mount existing PersistentVolumeClaims, ConfigMaps and Secrets inside the StatefulSet. | ||||
|   Also an `emptyDir` volume can be shared between initContainer and statefulSet. | ||||
|   Additionaly, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example). | ||||
|   You can also specify in which container the additional Volumes will be mounted with the `targetContainers` array option. | ||||
|   If `targetContainers` is empty, additional volumes will be mounted only in the `postgres` container. | ||||
|   If you set the `all` special item, it will be mounted in all containers (postgres + sidecars). | ||||
|   Else you can set the list of target containers in which the additional volumes will be mounted (eg : postgres, telegraf) | ||||
|    | ||||
| ## Postgres parameters | ||||
| 
 | ||||
| Those parameters are grouped under the `postgresql` top-level key, which is | ||||
|  | @ -419,5 +431,9 @@ Those parameters are grouped under the `tls` top-level key. | |||
|   Filename of the private key. Defaults to "tls.key". | ||||
| 
 | ||||
| * **caFile** | ||||
|   Optional filename to the CA certificate. Useful when the client connects | ||||
|   with `sslmode=verify-ca` or `sslmode=verify-full`. Default is empty. | ||||
|   Optional filename to the CA certificate (e.g. "ca.crt"). Useful when the | ||||
|   client connects with `sslmode=verify-ca` or `sslmode=verify-full`. | ||||
|   Default is empty. | ||||
| 
 | ||||
|   Optionally one can provide full path for any of them. By default it is | ||||
|   relative to the "/tls/", which is mount path of the tls secret. | ||||
|  |  | |||
|  | @ -584,7 +584,8 @@ don't know the value, use `103` which is the GID from the default spilo image | |||
| OpenShift allocates the users and groups dynamically (based on scc), and their | ||||
| range is different in every namespace. Due to this dynamic behaviour, it's not | ||||
| trivial to know at deploy time the uid/gid of the user in the cluster. | ||||
| This way, in OpenShift, you may want to skip the spilo_fsgroup setting. | ||||
| Therefore, instead of using a global `spilo_fsgroup` setting, use the `spiloFSGroup` field | ||||
| per Postgres cluster.``` | ||||
| 
 | ||||
| Upload the cert as a kubernetes secret: | ||||
| ```sh | ||||
|  |  | |||
|  | @ -12,6 +12,29 @@ spec: | |||
|   volume: | ||||
|     size: 1Gi | ||||
| #    storageClass: my-sc | ||||
|   additionalVolumes: | ||||
|     - name: data | ||||
|       mountPath: /home/postgres/pgdata/partitions | ||||
|       targetContainers: | ||||
|         - postgres | ||||
|       volumeSource: | ||||
|         PersistentVolumeClaim: | ||||
|           claimName: pvc-postgresql-data-partitions | ||||
|           readyOnly: false | ||||
|     - name: conf | ||||
|       mountPath: /etc/telegraf | ||||
|       subPath: telegraf.conf | ||||
|       targetContainers: | ||||
|         - telegraf-sidecar | ||||
|       volumeSource: | ||||
|         configMap: | ||||
|           name: my-config-map | ||||
|     - name: empty | ||||
|       mountPath: /opt/empty | ||||
|       targetContainers: | ||||
|         - all | ||||
|       volumeSource: | ||||
|         emptyDir: {} | ||||
|   numberOfInstances: 2 | ||||
|   users:  # Application/Robot users | ||||
|     zalando: | ||||
|  | @ -125,5 +148,8 @@ spec: | |||
|     certificateFile: "tls.crt" | ||||
|     privateKeyFile: "tls.key" | ||||
|     caFile: ""  # optionally configure Postgres with a CA certificate | ||||
| # file names can be also defined with absolute path, and will no longer be relative | ||||
| # to the "/tls/" path where the secret is being mounted by default. | ||||
| # When TLS is enabled, also set spiloFSGroup parameter above to the relevant value. | ||||
| # if unknown, set it to 103 which is the usual value in the default spilo images. | ||||
| # In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup. | ||||
|  |  | |||
|  | @ -38,6 +38,28 @@ spec: | |||
|             - teamId | ||||
|             - postgresql | ||||
|           properties: | ||||
|             additionalVolumes: | ||||
|               type: array | ||||
|               items: | ||||
|                 type: object | ||||
|                 required: | ||||
|                   - name | ||||
|                   - mountPath | ||||
|                   - volumeSource | ||||
|                 properties: | ||||
|                   name: | ||||
|                     type: string | ||||
|                   mountPath: | ||||
|                     type: string | ||||
|                   targetContainers: | ||||
|                     type: array | ||||
|                     nullable: true | ||||
|                     items: | ||||
|                       type: string | ||||
|                   volumeSource: | ||||
|                     type: object | ||||
|                   subPath: | ||||
|                     type: string | ||||
|             allowedSourceRanges: | ||||
|               type: array | ||||
|               nullable: true | ||||
|  |  | |||
|  | @ -682,6 +682,37 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ | |||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 					"additionalVolumes": { | ||||
| 						Type: "array", | ||||
| 						Items: &apiextv1beta1.JSONSchemaPropsOrArray{ | ||||
| 							Schema: &apiextv1beta1.JSONSchemaProps{ | ||||
| 								Type:     "object", | ||||
| 								Required: []string{"name", "mountPath", "volumeSource"}, | ||||
| 								Properties: map[string]apiextv1beta1.JSONSchemaProps{ | ||||
| 									"name": { | ||||
| 										Type: "string", | ||||
| 									}, | ||||
| 									"mountPath": { | ||||
| 										Type: "string", | ||||
| 									}, | ||||
| 									"targetContainers": { | ||||
| 										Type: "array", | ||||
| 										Items: &apiextv1beta1.JSONSchemaPropsOrArray{ | ||||
| 											Schema: &apiextv1beta1.JSONSchemaProps{ | ||||
| 												Type: "string", | ||||
| 											}, | ||||
| 										}, | ||||
| 									}, | ||||
| 									"volumeSource": { | ||||
| 										Type: "object", | ||||
| 									}, | ||||
| 									"subPath": { | ||||
| 										Type: "string", | ||||
| 									}, | ||||
| 								}, | ||||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			"status": { | ||||
|  |  | |||
|  | @ -67,6 +67,7 @@ type PostgresSpec struct { | |||
| 	PodAnnotations        map[string]string    `json:"podAnnotations"` | ||||
| 	ServiceAnnotations    map[string]string    `json:"serviceAnnotations"` | ||||
| 	TLS                   *TLSDescription      `json:"tls"` | ||||
| 	AdditionalVolumes     []AdditionalVolume   `json:"additionalVolumes,omitempty"` | ||||
| 
 | ||||
| 	// deprecated json tags
 | ||||
| 	InitContainersOld       []v1.Container `json:"init_containers,omitempty"` | ||||
|  | @ -98,6 +99,14 @@ type Volume struct { | |||
| 	SubPath      string `json:"subPath,omitempty"` | ||||
| } | ||||
| 
 | ||||
| type AdditionalVolume struct { | ||||
| 	Name             string          `json:"name"` | ||||
| 	MountPath        string          `json:"mountPath"` | ||||
| 	SubPath          string          `json:"subPath"` | ||||
| 	TargetContainers []string        `json:"targetContainers"` | ||||
| 	VolumeSource     v1.VolumeSource `json:"volume"` | ||||
| } | ||||
| 
 | ||||
| // PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
 | ||||
| type PostgresqlParam struct { | ||||
| 	PgVersion  string            `json:"version"` | ||||
|  |  | |||
|  | @ -877,9 +877,20 @@ func (c *Cluster) initSystemUsers() { | |||
| 			c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{} | ||||
| 		} | ||||
| 
 | ||||
| 		username := util.Coalesce( | ||||
| 			c.Spec.ConnectionPooler.User, | ||||
| 			c.OpConfig.ConnectionPooler.User) | ||||
| 		// Using superuser as pooler user is not a good idea. First of all it's
 | ||||
| 		// not going to be synced correctly with the current implementation,
 | ||||
| 		// and second it's a bad practice.
 | ||||
| 		username := c.OpConfig.ConnectionPooler.User | ||||
| 
 | ||||
| 		isSuperUser := c.Spec.ConnectionPooler.User == c.OpConfig.SuperUsername | ||||
| 		isProtectedUser := c.shouldAvoidProtectedOrSystemRole( | ||||
| 			c.Spec.ConnectionPooler.User, "connection pool role") | ||||
| 
 | ||||
| 		if !isSuperUser && !isProtectedUser { | ||||
| 			username = util.Coalesce( | ||||
| 				c.Spec.ConnectionPooler.User, | ||||
| 				c.OpConfig.ConnectionPooler.User) | ||||
| 		} | ||||
| 
 | ||||
| 		// connection pooler application should be able to login with this role
 | ||||
| 		connectionPoolerUser := spec.PgUser{ | ||||
|  |  | |||
|  | @ -721,4 +721,38 @@ func TestInitSystemUsers(t *testing.T) { | |||
| 	if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; !exist { | ||||
| 		t.Errorf("%s, connection pooler user is not present", testName) | ||||
| 	} | ||||
| 
 | ||||
| 	// superuser is not allowed as connection pool user
 | ||||
| 	cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ | ||||
| 		User: "postgres", | ||||
| 	} | ||||
| 	cl.OpConfig.SuperUsername = "postgres" | ||||
| 	cl.OpConfig.ConnectionPooler.User = "pooler" | ||||
| 
 | ||||
| 	cl.initSystemUsers() | ||||
| 	if _, exist := cl.pgUsers["pooler"]; !exist { | ||||
| 		t.Errorf("%s, Superuser is not allowed to be a connection pool user", testName) | ||||
| 	} | ||||
| 
 | ||||
| 	// neither protected users are
 | ||||
| 	delete(cl.pgUsers, "pooler") | ||||
| 	cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ | ||||
| 		User: "admin", | ||||
| 	} | ||||
| 	cl.OpConfig.ProtectedRoles = []string{"admin"} | ||||
| 
 | ||||
| 	cl.initSystemUsers() | ||||
| 	if _, exist := cl.pgUsers["pooler"]; !exist { | ||||
| 		t.Errorf("%s, Protected user are not allowed to be a connection pool user", testName) | ||||
| 	} | ||||
| 
 | ||||
| 	delete(cl.pgUsers, "pooler") | ||||
| 	cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ | ||||
| 		User: "standby", | ||||
| 	} | ||||
| 
 | ||||
| 	cl.initSystemUsers() | ||||
| 	if _, exist := cl.pgUsers["pooler"]; !exist { | ||||
| 		t.Errorf("%s, System users are not allowed to be a connection pool user", testName) | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -500,7 +500,7 @@ func mountShmVolumeNeeded(opConfig config.Config, spec *acidv1.PostgresSpec) *bo | |||
| 	return opConfig.ShmVolume | ||||
| } | ||||
| 
 | ||||
| func generatePodTemplate( | ||||
| func (c *Cluster) generatePodTemplate( | ||||
| 	namespace string, | ||||
| 	labels labels.Set, | ||||
| 	annotations map[string]string, | ||||
|  | @ -520,6 +520,7 @@ func generatePodTemplate( | |||
| 	additionalSecretMount string, | ||||
| 	additionalSecretMountPath string, | ||||
| 	volumes []v1.Volume, | ||||
| 	additionalVolumes []acidv1.AdditionalVolume, | ||||
| ) (*v1.PodTemplateSpec, error) { | ||||
| 
 | ||||
| 	terminateGracePeriodSeconds := terminateGracePeriod | ||||
|  | @ -559,6 +560,10 @@ func generatePodTemplate( | |||
| 		addSecretVolume(&podSpec, additionalSecretMount, additionalSecretMountPath) | ||||
| 	} | ||||
| 
 | ||||
| 	if additionalVolumes != nil { | ||||
| 		c.addAdditionalVolumes(&podSpec, additionalVolumes) | ||||
| 	} | ||||
| 
 | ||||
| 	template := v1.PodTemplateSpec{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Labels:      labels, | ||||
|  | @ -1084,7 +1089,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef | |||
| 	annotations := c.generatePodAnnotations(spec) | ||||
| 
 | ||||
| 	// generate pod template for the statefulset, based on the spilo container and sidecars
 | ||||
| 	podTemplate, err = generatePodTemplate( | ||||
| 	podTemplate, err = c.generatePodTemplate( | ||||
| 		c.Namespace, | ||||
| 		c.labelsSet(true), | ||||
| 		annotations, | ||||
|  | @ -1104,7 +1109,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef | |||
| 		c.OpConfig.AdditionalSecretMount, | ||||
| 		c.OpConfig.AdditionalSecretMountPath, | ||||
| 		volumes, | ||||
| 	) | ||||
| 		spec.AdditionalVolumes) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("could not generate pod template: %v", err) | ||||
| 	} | ||||
|  | @ -1298,6 +1304,69 @@ func addSecretVolume(podSpec *v1.PodSpec, additionalSecretMount string, addition | |||
| 	podSpec.Volumes = volumes | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec, | ||||
| 	additionalVolumes []acidv1.AdditionalVolume) { | ||||
| 
 | ||||
| 	volumes := podSpec.Volumes | ||||
| 	mountPaths := map[string]acidv1.AdditionalVolume{} | ||||
| 	for i, v := range additionalVolumes { | ||||
| 		if previousVolume, exist := mountPaths[v.MountPath]; exist { | ||||
| 			msg := "Volume %+v cannot be mounted to the same path as %+v" | ||||
| 			c.logger.Warningf(msg, v, previousVolume) | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if v.MountPath == constants.PostgresDataMount { | ||||
| 			msg := "Cannot mount volume on postgresql data directory, %+v" | ||||
| 			c.logger.Warningf(msg, v) | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if v.TargetContainers == nil { | ||||
| 			spiloContainer := podSpec.Containers[0] | ||||
| 			additionalVolumes[i].TargetContainers = []string{spiloContainer.Name} | ||||
| 		} | ||||
| 
 | ||||
| 		for _, target := range v.TargetContainers { | ||||
| 			if target == "all" && len(v.TargetContainers) != 1 { | ||||
| 				msg := `Target containers could be either "all" or a list | ||||
| 						of containers, mixing those is not allowed, %+v` | ||||
| 				c.logger.Warningf(msg, v) | ||||
| 				continue | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		volumes = append(volumes, | ||||
| 			v1.Volume{ | ||||
| 				Name:         v.Name, | ||||
| 				VolumeSource: v.VolumeSource, | ||||
| 			}, | ||||
| 		) | ||||
| 
 | ||||
| 		mountPaths[v.MountPath] = v | ||||
| 	} | ||||
| 
 | ||||
| 	c.logger.Infof("Mount additional volumes: %+v", additionalVolumes) | ||||
| 
 | ||||
| 	for i := range podSpec.Containers { | ||||
| 		mounts := podSpec.Containers[i].VolumeMounts | ||||
| 		for _, v := range additionalVolumes { | ||||
| 			for _, target := range v.TargetContainers { | ||||
| 				if podSpec.Containers[i].Name == target || target == "all" { | ||||
| 					mounts = append(mounts, v1.VolumeMount{ | ||||
| 						Name:      v.Name, | ||||
| 						MountPath: v.MountPath, | ||||
| 						SubPath:   v.SubPath, | ||||
| 					}) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		podSpec.Containers[i].VolumeMounts = mounts | ||||
| 	} | ||||
| 
 | ||||
| 	podSpec.Volumes = volumes | ||||
| } | ||||
| 
 | ||||
| func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) { | ||||
| 
 | ||||
| 	var storageClassName *string | ||||
|  | @ -1702,7 +1771,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { | |||
| 	annotations := c.generatePodAnnotations(&c.Spec) | ||||
| 
 | ||||
| 	// re-use the method that generates DB pod templates
 | ||||
| 	if podTemplate, err = generatePodTemplate( | ||||
| 	if podTemplate, err = c.generatePodTemplate( | ||||
| 		c.Namespace, | ||||
| 		labels, | ||||
| 		annotations, | ||||
|  | @ -1721,8 +1790,9 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { | |||
| 		"", | ||||
| 		c.OpConfig.AdditionalSecretMount, | ||||
| 		c.OpConfig.AdditionalSecretMountPath, | ||||
| 		nil); err != nil { | ||||
| 		return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err) | ||||
| 		nil, | ||||
| 		[]acidv1.AdditionalVolume{}); err != nil { | ||||
| 			return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// overwrite specific params of logical backups pods
 | ||||
|  |  | |||
|  | @ -1021,3 +1021,172 @@ func TestTLS(t *testing.T) { | |||
| 	assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"}) | ||||
| 	assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"}) | ||||
| } | ||||
| 
 | ||||
| func TestAdditionalVolume(t *testing.T) { | ||||
| 	testName := "TestAdditionalVolume" | ||||
| 	tests := []struct { | ||||
| 		subTest   string | ||||
| 		podSpec   *v1.PodSpec | ||||
| 		volumePos int | ||||
| 	}{ | ||||
| 		{ | ||||
| 			subTest: "empty PodSpec", | ||||
| 			podSpec: &v1.PodSpec{ | ||||
| 				Volumes: []v1.Volume{}, | ||||
| 				Containers: []v1.Container{ | ||||
| 					{ | ||||
| 						VolumeMounts: []v1.VolumeMount{}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			volumePos: 0, | ||||
| 		}, | ||||
| 		{ | ||||
| 			subTest: "non empty PodSpec", | ||||
| 			podSpec: &v1.PodSpec{ | ||||
| 				Volumes: []v1.Volume{{}}, | ||||
| 				Containers: []v1.Container{ | ||||
| 					{ | ||||
| 						Name: "postgres", | ||||
| 						VolumeMounts: []v1.VolumeMount{ | ||||
| 							{ | ||||
| 								Name:      "data", | ||||
| 								ReadOnly:  false, | ||||
| 								MountPath: "/data", | ||||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			volumePos: 1, | ||||
| 		}, | ||||
| 		{ | ||||
| 			subTest: "non empty PodSpec with sidecar", | ||||
| 			podSpec: &v1.PodSpec{ | ||||
| 				Volumes: []v1.Volume{{}}, | ||||
| 				Containers: []v1.Container{ | ||||
| 					{ | ||||
| 						Name: "postgres", | ||||
| 						VolumeMounts: []v1.VolumeMount{ | ||||
| 							{ | ||||
| 								Name:      "data", | ||||
| 								ReadOnly:  false, | ||||
| 								MountPath: "/data", | ||||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 					{ | ||||
| 						Name: "sidecar", | ||||
| 						VolumeMounts: []v1.VolumeMount{ | ||||
| 							{ | ||||
| 								Name:      "data", | ||||
| 								ReadOnly:  false, | ||||
| 								MountPath: "/data", | ||||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			volumePos: 1, | ||||
| 		}, | ||||
| 	} | ||||
| 
 | ||||
| 	var cluster = New( | ||||
| 		Config{ | ||||
| 			OpConfig: config.Config{ | ||||
| 				ProtectedRoles: []string{"admin"}, | ||||
| 				Auth: config.Auth{ | ||||
| 					SuperUsername:       superUserName, | ||||
| 					ReplicationUsername: replicationUserName, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) | ||||
| 
 | ||||
| 	for _, tt := range tests { | ||||
| 		// Test with additional volume mounted in all containers
 | ||||
| 		additionalVolumeMount := []acidv1.AdditionalVolume{ | ||||
| 			{ | ||||
| 				Name:             "test", | ||||
| 				MountPath:        "/test", | ||||
| 				TargetContainers: []string{"all"}, | ||||
| 				VolumeSource: v1.VolumeSource{ | ||||
| 					EmptyDir: &v1.EmptyDirVolumeSource{}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
| 
 | ||||
| 		numMounts := len(tt.podSpec.Containers[0].VolumeMounts) | ||||
| 
 | ||||
| 		cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount) | ||||
| 		volumeName := tt.podSpec.Volumes[tt.volumePos].Name | ||||
| 
 | ||||
| 		if volumeName != additionalVolumeMount[0].Name { | ||||
| 			t.Errorf("%s %s: Expected volume %v was not created, have %s instead", | ||||
| 				testName, tt.subTest, additionalVolumeMount, volumeName) | ||||
| 		} | ||||
| 
 | ||||
| 		for i := range tt.podSpec.Containers { | ||||
| 			volumeMountName := tt.podSpec.Containers[i].VolumeMounts[tt.volumePos].Name | ||||
| 
 | ||||
| 			if volumeMountName != additionalVolumeMount[0].Name { | ||||
| 				t.Errorf("%s %s: Expected mount %v was not created, have %s instead", | ||||
| 					testName, tt.subTest, additionalVolumeMount, volumeMountName) | ||||
| 			} | ||||
| 
 | ||||
| 		} | ||||
| 
 | ||||
| 		numMountsCheck := len(tt.podSpec.Containers[0].VolumeMounts) | ||||
| 
 | ||||
| 		if numMountsCheck != numMounts+1 { | ||||
| 			t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", | ||||
| 				numMountsCheck, numMounts+1) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	for _, tt := range tests { | ||||
| 		// Test with additional volume mounted only in first container
 | ||||
| 		additionalVolumeMount := []acidv1.AdditionalVolume{ | ||||
| 			{ | ||||
| 				Name:             "test", | ||||
| 				MountPath:        "/test", | ||||
| 				TargetContainers: []string{"postgres"}, | ||||
| 				VolumeSource: v1.VolumeSource{ | ||||
| 					EmptyDir: &v1.EmptyDirVolumeSource{}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
| 
 | ||||
| 		numMounts := len(tt.podSpec.Containers[0].VolumeMounts) | ||||
| 
 | ||||
| 		cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount) | ||||
| 		volumeName := tt.podSpec.Volumes[tt.volumePos].Name | ||||
| 
 | ||||
| 		if volumeName != additionalVolumeMount[0].Name { | ||||
| 			t.Errorf("%s %s: Expected volume %v was not created, have %s instead", | ||||
| 				testName, tt.subTest, additionalVolumeMount, volumeName) | ||||
| 		} | ||||
| 
 | ||||
| 		for _, container := range tt.podSpec.Containers { | ||||
| 			if container.Name == "postgres" { | ||||
| 				volumeMountName := container.VolumeMounts[tt.volumePos].Name | ||||
| 
 | ||||
| 				if volumeMountName != additionalVolumeMount[0].Name { | ||||
| 					t.Errorf("%s %s: Expected mount %v was not created, have %s instead", | ||||
| 						testName, tt.subTest, additionalVolumeMount, volumeMountName) | ||||
| 				} | ||||
| 
 | ||||
| 				numMountsCheck := len(container.VolumeMounts) | ||||
| 				if numMountsCheck != numMounts+1 { | ||||
| 					t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", | ||||
| 						numMountsCheck, numMounts+1) | ||||
| 				} | ||||
| 			} else { | ||||
| 				numMountsCheck := len(container.VolumeMounts) | ||||
| 				if numMountsCheck == numMounts+1 { | ||||
| 					t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v", | ||||
| 						numMountsCheck, numMounts) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -105,7 +105,12 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo | |||
| 	var msg string | ||||
| 	c.setProcessName("creating connection pooler") | ||||
| 
 | ||||
| 	if c.ConnectionPooler == nil { | ||||
| 		c.ConnectionPooler = &ConnectionPoolerObjects{} | ||||
| 	} | ||||
| 
 | ||||
| 	schema := c.Spec.ConnectionPooler.Schema | ||||
| 
 | ||||
| 	if schema == "" { | ||||
| 		schema = c.OpConfig.ConnectionPooler.Schema | ||||
| 	} | ||||
|  |  | |||
|  | @ -169,6 +169,11 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | |||
| 		fromCRD.ConnectionPooler.User, | ||||
| 		constants.ConnectionPoolerUserName) | ||||
| 
 | ||||
| 	if result.ConnectionPooler.User == result.SuperUsername { | ||||
| 		msg := "Connection pool user is not allowed to be the same as super user, username: %s" | ||||
| 		panic(fmt.Errorf(msg, result.ConnectionPooler.User)) | ||||
| 	} | ||||
| 
 | ||||
| 	result.ConnectionPooler.Image = util.Coalesce( | ||||
| 		fromCRD.ConnectionPooler.Image, | ||||
| 		"registry.opensource.zalan.do/acid/pgbouncer") | ||||
|  |  | |||
|  | @ -219,5 +219,11 @@ func validate(cfg *Config) (err error) { | |||
| 		msg := "number of connection pooler instances should be higher than %d" | ||||
| 		err = fmt.Errorf(msg, constants.ConnectionPoolerMinInstances) | ||||
| 	} | ||||
| 
 | ||||
| 	if cfg.ConnectionPooler.User == cfg.SuperUsername { | ||||
| 		msg := "Connection pool user is not allowed to be the same as super user, username: %s" | ||||
| 		err = fmt.Errorf(msg, cfg.ConnectionPooler.User) | ||||
| 	} | ||||
| 
 | ||||
| 	return | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue