make tls pr798 use additionalVolumes capability from pr736 (#920)
* make tls pr798 use additionalVolumes capability from pr736 * move the volume* sections lower * update helm chart crds and docs * fix user.md typos
This commit is contained in:
parent
ea3eef45d9
commit
7e8f6687eb
|
|
@ -7,6 +7,8 @@
|
|||
_obj
|
||||
_test
|
||||
_manifests
|
||||
_tmp
|
||||
github.com
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
|
|
|
|||
|
|
@ -364,6 +364,21 @@ spec:
|
|||
type: string
|
||||
teamId:
|
||||
type: string
|
||||
tls:
|
||||
type: object
|
||||
required:
|
||||
- secretName
|
||||
properties:
|
||||
secretName:
|
||||
type: string
|
||||
certificateFile:
|
||||
type: string
|
||||
privateKeyFile:
|
||||
type: string
|
||||
caFile:
|
||||
type: string
|
||||
caSecretName:
|
||||
type: string
|
||||
tolerations:
|
||||
type: array
|
||||
items:
|
||||
|
|
|
|||
|
|
@ -435,5 +435,12 @@ Those parameters are grouped under the `tls` top-level key.
|
|||
client connects with `sslmode=verify-ca` or `sslmode=verify-full`.
|
||||
Default is empty.
|
||||
|
||||
* **caSecretName**
|
||||
By setting the `caSecretName` value, the ca certificate file defined by the
|
||||
`caFile` will be fetched from this secret instead of `secretName` above.
|
||||
This secret has to hold a file with that name in its root.
|
||||
|
||||
Optionally one can provide full path for any of them. By default it is
|
||||
relative to the "/tls/", which is mount path of the tls secret.
|
||||
If `caSecretName` is defined, the ca.crt path is relative to "/tlsca/",
|
||||
otherwise to the same "/tls/".
|
||||
|
|
|
|||
31
docs/user.md
31
docs/user.md
|
|
@ -585,7 +585,7 @@ OpenShift allocates the users and groups dynamically (based on scc), and their
|
|||
range is different in every namespace. Due to this dynamic behaviour, it's not
|
||||
trivial to know at deploy time the uid/gid of the user in the cluster.
|
||||
Therefore, instead of using a global `spilo_fsgroup` setting, use the `spiloFSGroup` field
|
||||
per Postgres cluster.```
|
||||
per Postgres cluster.
|
||||
|
||||
Upload the cert as a kubernetes secret:
|
||||
```sh
|
||||
|
|
@ -594,7 +594,7 @@ kubectl create secret tls pg-tls \
|
|||
--cert pg-tls.crt
|
||||
```
|
||||
|
||||
Or with a CA:
|
||||
When doing client auth, CA can come optionally from the same secret:
|
||||
```sh
|
||||
kubectl create secret generic pg-tls \
|
||||
--from-file=tls.crt=server.crt \
|
||||
|
|
@ -602,9 +602,6 @@ kubectl create secret generic pg-tls \
|
|||
--from-file=ca.crt=ca.crt
|
||||
```
|
||||
|
||||
Alternatively it is also possible to use
|
||||
[cert-manager](https://cert-manager.io/docs/) to generate these secrets.
|
||||
|
||||
Then configure the postgres resource with the TLS secret:
|
||||
|
||||
```yaml
|
||||
|
|
@ -619,5 +616,29 @@ spec:
|
|||
caFile: "ca.crt" # add this if the secret is configured with a CA
|
||||
```
|
||||
|
||||
Optionally, the CA can be provided by a different secret:
|
||||
```sh
|
||||
kubectl create secret generic pg-tls-ca \
|
||||
--from-file=ca.crt=ca.crt
|
||||
```
|
||||
|
||||
Then configure the postgres resource with the TLS secret:
|
||||
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
|
||||
metadata:
|
||||
name: acid-test-cluster
|
||||
spec:
|
||||
tls:
|
||||
secretName: "pg-tls" # this should hold tls.key and tls.crt
|
||||
caSecretName: "pg-tls-ca" # this should hold ca.crt
|
||||
caFile: "ca.crt" # add this if the secret is configured with a CA
|
||||
```
|
||||
|
||||
Alternatively, it is also possible to use
|
||||
[cert-manager](https://cert-manager.io/docs/) to generate these secrets.
|
||||
|
||||
Certificate rotation is handled in the spilo image which checks every 5
|
||||
minutes if the certificates have changed and reloads postgres accordingly.
|
||||
|
|
|
|||
|
|
@ -9,6 +9,24 @@ metadata:
|
|||
spec:
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
teamId: "acid"
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
zalando:
|
||||
- superuser
|
||||
- createdb
|
||||
enableMasterLoadBalancer: false
|
||||
enableReplicaLoadBalancer: false
|
||||
# enableConnectionPooler: true # not needed when connectionPooler section is present (see below)
|
||||
allowedSourceRanges: # load balancers' source ranges for both master and replica services
|
||||
- 127.0.0.1/32
|
||||
databases:
|
||||
foo: zalando
|
||||
postgresql:
|
||||
version: "12"
|
||||
parameters: # Expert section
|
||||
shared_buffers: "32MB"
|
||||
max_connections: "10"
|
||||
log_statement: "all"
|
||||
volume:
|
||||
size: 1Gi
|
||||
# storageClass: my-sc
|
||||
|
|
@ -35,24 +53,6 @@ spec:
|
|||
- all
|
||||
volumeSource:
|
||||
emptyDir: {}
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
zalando:
|
||||
- superuser
|
||||
- createdb
|
||||
enableMasterLoadBalancer: false
|
||||
enableReplicaLoadBalancer: false
|
||||
# enableConnectionPooler: true # not needed when connectionPooler section is present (see below)
|
||||
allowedSourceRanges: # load balancers' source ranges for both master and replica services
|
||||
- 127.0.0.1/32
|
||||
databases:
|
||||
foo: zalando
|
||||
postgresql:
|
||||
version: "12"
|
||||
parameters: # Expert section
|
||||
shared_buffers: "32MB"
|
||||
max_connections: "10"
|
||||
log_statement: "all"
|
||||
|
||||
enableShmVolume: true
|
||||
# spiloFSGroup: 103
|
||||
|
|
@ -148,8 +148,10 @@ spec:
|
|||
certificateFile: "tls.crt"
|
||||
privateKeyFile: "tls.key"
|
||||
caFile: "" # optionally configure Postgres with a CA certificate
|
||||
caSecretName: "" # optionally the ca.crt can come from this secret instead.
|
||||
# file names can be also defined with absolute path, and will no longer be relative
|
||||
# to the "/tls/" path where the secret is being mounted by default.
|
||||
# to the "/tls/" path where the secret is being mounted by default, and "/tlsca/"
|
||||
# where the caSecret is mounted by default.
|
||||
# When TLS is enabled, also set spiloFSGroup parameter above to the relevant value.
|
||||
# if unknown, set it to 103 which is the usual value in the default spilo images.
|
||||
# In Openshift, there is no need to set spiloFSGroup/spilo_fsgroup.
|
||||
|
|
|
|||
|
|
@ -341,6 +341,8 @@ spec:
|
|||
type: string
|
||||
caFile:
|
||||
type: string
|
||||
caSecretName:
|
||||
type: string
|
||||
tolerations:
|
||||
type: array
|
||||
items:
|
||||
|
|
|
|||
|
|
@ -513,6 +513,9 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
"caFile": {
|
||||
Type: "string",
|
||||
},
|
||||
"caSecretName": {
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"tolerations": {
|
||||
|
|
|
|||
|
|
@ -148,6 +148,7 @@ type TLSDescription struct {
|
|||
CertificateFile string `json:"certificateFile,omitempty"`
|
||||
PrivateKeyFile string `json:"privateKeyFile,omitempty"`
|
||||
CAFile string `json:"caFile,omitempty"`
|
||||
CASecretName string `json:"caSecretName,omitempty"`
|
||||
}
|
||||
|
||||
// CloneDescription describes which cluster the new should clone and up to which point in time
|
||||
|
|
|
|||
|
|
@ -47,6 +47,28 @@ func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdditionalVolume) DeepCopyInto(out *AdditionalVolume) {
|
||||
*out = *in
|
||||
if in.TargetContainers != nil {
|
||||
in, out := &in.TargetContainers, &out.TargetContainers
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.VolumeSource.DeepCopyInto(&out.VolumeSource)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolume.
|
||||
func (in *AdditionalVolume) DeepCopy() *AdditionalVolume {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AdditionalVolume)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CloneDescription) DeepCopyInto(out *CloneDescription) {
|
||||
*out = *in
|
||||
|
|
@ -591,6 +613,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
*out = new(TLSDescription)
|
||||
**out = **in
|
||||
}
|
||||
if in.AdditionalVolumes != nil {
|
||||
in, out := &in.AdditionalVolumes, &out.AdditionalVolumes
|
||||
*out = make([]AdditionalVolume, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.InitContainersOld != nil {
|
||||
in, out := &in.InitContainersOld, &out.InitContainersOld
|
||||
*out = make([]corev1.Container, len(*in))
|
||||
|
|
|
|||
|
|
@ -519,7 +519,6 @@ func (c *Cluster) generatePodTemplate(
|
|||
podAntiAffinityTopologyKey string,
|
||||
additionalSecretMount string,
|
||||
additionalSecretMountPath string,
|
||||
volumes []v1.Volume,
|
||||
additionalVolumes []acidv1.AdditionalVolume,
|
||||
) (*v1.PodTemplateSpec, error) {
|
||||
|
||||
|
|
@ -539,7 +538,6 @@ func (c *Cluster) generatePodTemplate(
|
|||
InitContainers: initContainers,
|
||||
Tolerations: *tolerationsSpec,
|
||||
SecurityContext: &securityContext,
|
||||
Volumes: volumes,
|
||||
}
|
||||
|
||||
if shmVolume != nil && *shmVolume {
|
||||
|
|
@ -854,7 +852,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
sidecarContainers []v1.Container
|
||||
podTemplate *v1.PodTemplateSpec
|
||||
volumeClaimTemplate *v1.PersistentVolumeClaim
|
||||
volumes []v1.Volume
|
||||
additionalVolumes = spec.AdditionalVolumes
|
||||
)
|
||||
|
||||
// Improve me. Please.
|
||||
|
|
@ -1007,8 +1005,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
// this is combined with the FSGroup in the section above
|
||||
// to give read access to the postgres user
|
||||
defaultMode := int32(0640)
|
||||
volumes = append(volumes, v1.Volume{
|
||||
Name: "tls-secret",
|
||||
mountPath := "/tls"
|
||||
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
|
||||
Name: spec.TLS.SecretName,
|
||||
MountPath: mountPath,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.SecretName,
|
||||
|
|
@ -1017,13 +1017,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
},
|
||||
})
|
||||
|
||||
mountPath := "/tls"
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
||||
MountPath: mountPath,
|
||||
Name: "tls-secret",
|
||||
ReadOnly: true,
|
||||
})
|
||||
|
||||
// use the same filenames as Secret resources by default
|
||||
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt")
|
||||
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key")
|
||||
|
|
@ -1034,11 +1027,31 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
)
|
||||
|
||||
if spec.TLS.CAFile != "" {
|
||||
caFile := ensurePath(spec.TLS.CAFile, mountPath, "")
|
||||
// support scenario when the ca.crt resides in a different secret, diff path
|
||||
mountPathCA := mountPath
|
||||
if spec.TLS.CASecretName != "" {
|
||||
mountPathCA = mountPath + "ca"
|
||||
}
|
||||
|
||||
caFile := ensurePath(spec.TLS.CAFile, mountPathCA, "")
|
||||
spiloEnvVars = append(
|
||||
spiloEnvVars,
|
||||
v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile},
|
||||
)
|
||||
|
||||
// the ca file from CASecretName secret takes priority
|
||||
if spec.TLS.CASecretName != "" {
|
||||
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
|
||||
Name: spec.TLS.CASecretName,
|
||||
MountPath: mountPathCA,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.CASecretName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1108,8 +1121,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
c.OpConfig.PodAntiAffinityTopologyKey,
|
||||
c.OpConfig.AdditionalSecretMount,
|
||||
c.OpConfig.AdditionalSecretMountPath,
|
||||
volumes,
|
||||
spec.AdditionalVolumes)
|
||||
additionalVolumes)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template: %v", err)
|
||||
|
|
@ -1614,11 +1626,11 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
|
|||
c.logger.Info(msg, description.S3WalPath)
|
||||
|
||||
envs := []v1.EnvVar{
|
||||
v1.EnvVar{
|
||||
{
|
||||
Name: "CLONE_WAL_S3_BUCKET",
|
||||
Value: c.OpConfig.WALES3Bucket,
|
||||
},
|
||||
v1.EnvVar{
|
||||
{
|
||||
Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX",
|
||||
Value: getBucketScopeSuffix(description.UID),
|
||||
},
|
||||
|
|
@ -1790,7 +1802,6 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
"",
|
||||
c.OpConfig.AdditionalSecretMount,
|
||||
c.OpConfig.AdditionalSecretMountPath,
|
||||
nil,
|
||||
[]acidv1.AdditionalVolume{}); err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -961,6 +961,7 @@ func TestTLS(t *testing.T) {
|
|||
var spec acidv1.PostgresSpec
|
||||
var cluster *Cluster
|
||||
var spiloFSGroup = int64(103)
|
||||
var additionalVolumes = spec.AdditionalVolumes
|
||||
|
||||
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
|
||||
return acidv1.PostgresSpec{
|
||||
|
|
@ -1000,8 +1001,20 @@ func TestTLS(t *testing.T) {
|
|||
assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
|
||||
|
||||
defaultMode := int32(0640)
|
||||
mountPath := "/tls"
|
||||
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
|
||||
Name: spec.TLS.SecretName,
|
||||
MountPath: mountPath,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.SecretName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
volume := v1.Volume{
|
||||
Name: "tls-secret",
|
||||
Name: "my-secret",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: "my-secret",
|
||||
|
|
@ -1013,8 +1026,7 @@ func TestTLS(t *testing.T) {
|
|||
|
||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||
MountPath: "/tls",
|
||||
Name: "tls-secret",
|
||||
ReadOnly: true,
|
||||
Name: "my-secret",
|
||||
}, "the volume gets mounted in /tls")
|
||||
|
||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
|
||||
|
|
|
|||
Loading…
Reference in New Issue