Additional volumes capability (#736)
* Allow additional Volumes to be mounted * added TargetContainers option to determine if additional volume need to be mounter or not * fixed dependencies * updated manifest additional volume example * More validation Check that there are no volume mount path clashes or "all" vs ["a", "b"] mixtures. Also change the default behaviour to mount to "postgres" container. * More documentation / example about additional volumes * Revert go.sum and go.mod from origin/master * Declare addictionalVolume specs in CRDs * fixed k8sres after rebase * resolv conflict Co-authored-by: Dmitrii Dolgov <9erthalion6@gmail.com> Co-authored-by: Thierry <thierry@malt.com>
This commit is contained in:
parent
a1f2bd05b9
commit
ea3eef45d9
|
|
@ -74,6 +74,28 @@ spec:
|
|||
- teamId
|
||||
- postgresql
|
||||
properties:
|
||||
additionalVolumes:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- mountPath
|
||||
- volumeSource
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
mountPath:
|
||||
type: string
|
||||
targetContainers:
|
||||
type: array
|
||||
nullable: true
|
||||
items:
|
||||
type: string
|
||||
volumeSource:
|
||||
type: object
|
||||
subPath:
|
||||
type: string
|
||||
allowedSourceRanges:
|
||||
type: array
|
||||
nullable: true
|
||||
|
|
|
|||
|
|
@ -154,6 +154,18 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
[the reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule)
|
||||
into account. Optional. Default is: "30 00 \* \* \*"
|
||||
|
||||
* **additionalVolumes**
|
||||
List of additional volumes to mount in each container of the statefulset pod.
|
||||
Each item must contain a `name`, `mountPath`, and `volumeSource` which is a
|
||||
[kubernetes volumeSource](https://godoc.org/k8s.io/api/core/v1#VolumeSource).
|
||||
It allows you to mount existing PersistentVolumeClaims, ConfigMaps and Secrets inside the StatefulSet.
|
||||
Also an `emptyDir` volume can be shared between initContainer and statefulSet.
|
||||
Additionaly, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example).
|
||||
You can also specify in which container the additional Volumes will be mounted with the `targetContainers` array option.
|
||||
If `targetContainers` is empty, additional volumes will be mounted only in the `postgres` container.
|
||||
If you set the `all` special item, it will be mounted in all containers (postgres + sidecars).
|
||||
Else you can set the list of target containers in which the additional volumes will be mounted (eg : postgres, telegraf)
|
||||
|
||||
## Postgres parameters
|
||||
|
||||
Those parameters are grouped under the `postgresql` top-level key, which is
|
||||
|
|
|
|||
|
|
@ -12,6 +12,29 @@ spec:
|
|||
volume:
|
||||
size: 1Gi
|
||||
# storageClass: my-sc
|
||||
additionalVolumes:
|
||||
- name: data
|
||||
mountPath: /home/postgres/pgdata/partitions
|
||||
targetContainers:
|
||||
- postgres
|
||||
volumeSource:
|
||||
PersistentVolumeClaim:
|
||||
claimName: pvc-postgresql-data-partitions
|
||||
readyOnly: false
|
||||
- name: conf
|
||||
mountPath: /etc/telegraf
|
||||
subPath: telegraf.conf
|
||||
targetContainers:
|
||||
- telegraf-sidecar
|
||||
volumeSource:
|
||||
configMap:
|
||||
name: my-config-map
|
||||
- name: empty
|
||||
mountPath: /opt/empty
|
||||
targetContainers:
|
||||
- all
|
||||
volumeSource:
|
||||
emptyDir: {}
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
zalando:
|
||||
|
|
|
|||
|
|
@ -38,6 +38,28 @@ spec:
|
|||
- teamId
|
||||
- postgresql
|
||||
properties:
|
||||
additionalVolumes:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- mountPath
|
||||
- volumeSource
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
mountPath:
|
||||
type: string
|
||||
targetContainers:
|
||||
type: array
|
||||
nullable: true
|
||||
items:
|
||||
type: string
|
||||
volumeSource:
|
||||
type: object
|
||||
subPath:
|
||||
type: string
|
||||
allowedSourceRanges:
|
||||
type: array
|
||||
nullable: true
|
||||
|
|
|
|||
|
|
@ -682,6 +682,37 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"additionalVolumes": {
|
||||
Type: "array",
|
||||
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Required: []string{"name", "mountPath", "volumeSource"},
|
||||
Properties: map[string]apiextv1beta1.JSONSchemaProps{
|
||||
"name": {
|
||||
Type: "string",
|
||||
},
|
||||
"mountPath": {
|
||||
Type: "string",
|
||||
},
|
||||
"targetContainers": {
|
||||
Type: "array",
|
||||
Items: &apiextv1beta1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1beta1.JSONSchemaProps{
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"volumeSource": {
|
||||
Type: "object",
|
||||
},
|
||||
"subPath": {
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
|
|
|
|||
|
|
@ -67,6 +67,7 @@ type PostgresSpec struct {
|
|||
PodAnnotations map[string]string `json:"podAnnotations"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations"`
|
||||
TLS *TLSDescription `json:"tls"`
|
||||
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
|
||||
|
||||
// deprecated json tags
|
||||
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
||||
|
|
@ -98,6 +99,14 @@ type Volume struct {
|
|||
SubPath string `json:"subPath,omitempty"`
|
||||
}
|
||||
|
||||
type AdditionalVolume struct {
|
||||
Name string `json:"name"`
|
||||
MountPath string `json:"mountPath"`
|
||||
SubPath string `json:"subPath"`
|
||||
TargetContainers []string `json:"targetContainers"`
|
||||
VolumeSource v1.VolumeSource `json:"volume"`
|
||||
}
|
||||
|
||||
// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
|
||||
type PostgresqlParam struct {
|
||||
PgVersion string `json:"version"`
|
||||
|
|
|
|||
|
|
@ -500,7 +500,7 @@ func mountShmVolumeNeeded(opConfig config.Config, spec *acidv1.PostgresSpec) *bo
|
|||
return opConfig.ShmVolume
|
||||
}
|
||||
|
||||
func generatePodTemplate(
|
||||
func (c *Cluster) generatePodTemplate(
|
||||
namespace string,
|
||||
labels labels.Set,
|
||||
annotations map[string]string,
|
||||
|
|
@ -520,6 +520,7 @@ func generatePodTemplate(
|
|||
additionalSecretMount string,
|
||||
additionalSecretMountPath string,
|
||||
volumes []v1.Volume,
|
||||
additionalVolumes []acidv1.AdditionalVolume,
|
||||
) (*v1.PodTemplateSpec, error) {
|
||||
|
||||
terminateGracePeriodSeconds := terminateGracePeriod
|
||||
|
|
@ -559,6 +560,10 @@ func generatePodTemplate(
|
|||
addSecretVolume(&podSpec, additionalSecretMount, additionalSecretMountPath)
|
||||
}
|
||||
|
||||
if additionalVolumes != nil {
|
||||
c.addAdditionalVolumes(&podSpec, additionalVolumes)
|
||||
}
|
||||
|
||||
template := v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
|
|
@ -1084,7 +1089,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
annotations := c.generatePodAnnotations(spec)
|
||||
|
||||
// generate pod template for the statefulset, based on the spilo container and sidecars
|
||||
podTemplate, err = generatePodTemplate(
|
||||
podTemplate, err = c.generatePodTemplate(
|
||||
c.Namespace,
|
||||
c.labelsSet(true),
|
||||
annotations,
|
||||
|
|
@ -1104,7 +1109,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
c.OpConfig.AdditionalSecretMount,
|
||||
c.OpConfig.AdditionalSecretMountPath,
|
||||
volumes,
|
||||
)
|
||||
spec.AdditionalVolumes)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template: %v", err)
|
||||
}
|
||||
|
|
@ -1298,6 +1304,69 @@ func addSecretVolume(podSpec *v1.PodSpec, additionalSecretMount string, addition
|
|||
podSpec.Volumes = volumes
|
||||
}
|
||||
|
||||
func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec,
|
||||
additionalVolumes []acidv1.AdditionalVolume) {
|
||||
|
||||
volumes := podSpec.Volumes
|
||||
mountPaths := map[string]acidv1.AdditionalVolume{}
|
||||
for i, v := range additionalVolumes {
|
||||
if previousVolume, exist := mountPaths[v.MountPath]; exist {
|
||||
msg := "Volume %+v cannot be mounted to the same path as %+v"
|
||||
c.logger.Warningf(msg, v, previousVolume)
|
||||
continue
|
||||
}
|
||||
|
||||
if v.MountPath == constants.PostgresDataMount {
|
||||
msg := "Cannot mount volume on postgresql data directory, %+v"
|
||||
c.logger.Warningf(msg, v)
|
||||
continue
|
||||
}
|
||||
|
||||
if v.TargetContainers == nil {
|
||||
spiloContainer := podSpec.Containers[0]
|
||||
additionalVolumes[i].TargetContainers = []string{spiloContainer.Name}
|
||||
}
|
||||
|
||||
for _, target := range v.TargetContainers {
|
||||
if target == "all" && len(v.TargetContainers) != 1 {
|
||||
msg := `Target containers could be either "all" or a list
|
||||
of containers, mixing those is not allowed, %+v`
|
||||
c.logger.Warningf(msg, v)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
volumes = append(volumes,
|
||||
v1.Volume{
|
||||
Name: v.Name,
|
||||
VolumeSource: v.VolumeSource,
|
||||
},
|
||||
)
|
||||
|
||||
mountPaths[v.MountPath] = v
|
||||
}
|
||||
|
||||
c.logger.Infof("Mount additional volumes: %+v", additionalVolumes)
|
||||
|
||||
for i := range podSpec.Containers {
|
||||
mounts := podSpec.Containers[i].VolumeMounts
|
||||
for _, v := range additionalVolumes {
|
||||
for _, target := range v.TargetContainers {
|
||||
if podSpec.Containers[i].Name == target || target == "all" {
|
||||
mounts = append(mounts, v1.VolumeMount{
|
||||
Name: v.Name,
|
||||
MountPath: v.MountPath,
|
||||
SubPath: v.SubPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
podSpec.Containers[i].VolumeMounts = mounts
|
||||
}
|
||||
|
||||
podSpec.Volumes = volumes
|
||||
}
|
||||
|
||||
func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) {
|
||||
|
||||
var storageClassName *string
|
||||
|
|
@ -1702,7 +1771,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
annotations := c.generatePodAnnotations(&c.Spec)
|
||||
|
||||
// re-use the method that generates DB pod templates
|
||||
if podTemplate, err = generatePodTemplate(
|
||||
if podTemplate, err = c.generatePodTemplate(
|
||||
c.Namespace,
|
||||
labels,
|
||||
annotations,
|
||||
|
|
@ -1721,8 +1790,9 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
"",
|
||||
c.OpConfig.AdditionalSecretMount,
|
||||
c.OpConfig.AdditionalSecretMountPath,
|
||||
nil); err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
|
||||
nil,
|
||||
[]acidv1.AdditionalVolume{}); err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
|
||||
}
|
||||
|
||||
// overwrite specific params of logical backups pods
|
||||
|
|
|
|||
|
|
@ -1021,3 +1021,172 @@ func TestTLS(t *testing.T) {
|
|||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
|
||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
|
||||
}
|
||||
|
||||
func TestAdditionalVolume(t *testing.T) {
|
||||
testName := "TestAdditionalVolume"
|
||||
tests := []struct {
|
||||
subTest string
|
||||
podSpec *v1.PodSpec
|
||||
volumePos int
|
||||
}{
|
||||
{
|
||||
subTest: "empty PodSpec",
|
||||
podSpec: &v1.PodSpec{
|
||||
Volumes: []v1.Volume{},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
VolumeMounts: []v1.VolumeMount{},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePos: 0,
|
||||
},
|
||||
{
|
||||
subTest: "non empty PodSpec",
|
||||
podSpec: &v1.PodSpec{
|
||||
Volumes: []v1.Volume{{}},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "postgres",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "data",
|
||||
ReadOnly: false,
|
||||
MountPath: "/data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePos: 1,
|
||||
},
|
||||
{
|
||||
subTest: "non empty PodSpec with sidecar",
|
||||
podSpec: &v1.PodSpec{
|
||||
Volumes: []v1.Volume{{}},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "postgres",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "data",
|
||||
ReadOnly: false,
|
||||
MountPath: "/data",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "sidecar",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "data",
|
||||
ReadOnly: false,
|
||||
MountPath: "/data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumePos: 1,
|
||||
},
|
||||
}
|
||||
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
||||
for _, tt := range tests {
|
||||
// Test with additional volume mounted in all containers
|
||||
additionalVolumeMount := []acidv1.AdditionalVolume{
|
||||
{
|
||||
Name: "test",
|
||||
MountPath: "/test",
|
||||
TargetContainers: []string{"all"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
numMounts := len(tt.podSpec.Containers[0].VolumeMounts)
|
||||
|
||||
cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount)
|
||||
volumeName := tt.podSpec.Volumes[tt.volumePos].Name
|
||||
|
||||
if volumeName != additionalVolumeMount[0].Name {
|
||||
t.Errorf("%s %s: Expected volume %v was not created, have %s instead",
|
||||
testName, tt.subTest, additionalVolumeMount, volumeName)
|
||||
}
|
||||
|
||||
for i := range tt.podSpec.Containers {
|
||||
volumeMountName := tt.podSpec.Containers[i].VolumeMounts[tt.volumePos].Name
|
||||
|
||||
if volumeMountName != additionalVolumeMount[0].Name {
|
||||
t.Errorf("%s %s: Expected mount %v was not created, have %s instead",
|
||||
testName, tt.subTest, additionalVolumeMount, volumeMountName)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
numMountsCheck := len(tt.podSpec.Containers[0].VolumeMounts)
|
||||
|
||||
if numMountsCheck != numMounts+1 {
|
||||
t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v",
|
||||
numMountsCheck, numMounts+1)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
// Test with additional volume mounted only in first container
|
||||
additionalVolumeMount := []acidv1.AdditionalVolume{
|
||||
{
|
||||
Name: "test",
|
||||
MountPath: "/test",
|
||||
TargetContainers: []string{"postgres"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
numMounts := len(tt.podSpec.Containers[0].VolumeMounts)
|
||||
|
||||
cluster.addAdditionalVolumes(tt.podSpec, additionalVolumeMount)
|
||||
volumeName := tt.podSpec.Volumes[tt.volumePos].Name
|
||||
|
||||
if volumeName != additionalVolumeMount[0].Name {
|
||||
t.Errorf("%s %s: Expected volume %v was not created, have %s instead",
|
||||
testName, tt.subTest, additionalVolumeMount, volumeName)
|
||||
}
|
||||
|
||||
for _, container := range tt.podSpec.Containers {
|
||||
if container.Name == "postgres" {
|
||||
volumeMountName := container.VolumeMounts[tt.volumePos].Name
|
||||
|
||||
if volumeMountName != additionalVolumeMount[0].Name {
|
||||
t.Errorf("%s %s: Expected mount %v was not created, have %s instead",
|
||||
testName, tt.subTest, additionalVolumeMount, volumeMountName)
|
||||
}
|
||||
|
||||
numMountsCheck := len(container.VolumeMounts)
|
||||
if numMountsCheck != numMounts+1 {
|
||||
t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v",
|
||||
numMountsCheck, numMounts+1)
|
||||
}
|
||||
} else {
|
||||
numMountsCheck := len(container.VolumeMounts)
|
||||
if numMountsCheck == numMounts+1 {
|
||||
t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v",
|
||||
numMountsCheck, numMounts)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue