add support for custom TLS certificates (#798)

* add support for custom TLS certificates
This commit is contained in:
zimbatm 2020-03-13 11:44:38 +01:00 committed by GitHub
parent 650b8daf77
commit 65fb2ce1a6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 285 additions and 19 deletions

View File

@ -359,3 +359,24 @@ CPU and memory limits for the sidecar container.
* **memory**
memory limits for the sidecar container. Optional, overrides the
`default_memory_limits` operator configuration parameter. Optional.
## Custom TLS certificates
Those parameters are grouped under the `tls` top-level key.
* **secretName**
By setting the `secretName` value, the cluster will switch to load the given
Kubernetes Secret into the container as a volume and uses that as the
certificate instead. It is up to the user to create and manage the
Kubernetes Secret either by hand or using a tool like the CertManager
operator.
* **certificateFile**
Filename of the certificate. Defaults to "tls.crt".
* **privateKeyFile**
Filename of the private key. Defaults to "tls.key".
* **caFile**
Optional filename to the CA certificate. Useful when the client connects
with `sslmode=verify-ca` or `sslmode=verify-full`.

View File

@ -511,3 +511,50 @@ monitoring is outside the scope of operator responsibilities. See
[configuration reference](reference/cluster_manifest.md) and
[administrator documentation](administrator.md) for details on how backups are
executed.
## Custom TLS certificates
By default, the spilo image generates its own TLS certificate during startup.
This certificate is not secure since it cannot be verified and thus doesn't
protect from active MITM attacks. In this section we show how a Kubernete
Secret resources can be loaded with a custom TLS certificate.
Before applying these changes, the operator must also be configured with the
`spilo_fsgroup` set to the GID matching the postgres user group. If the value
is not provided, the cluster will default to `103` which is the GID from the
default spilo image.
Upload the cert as a kubernetes secret:
```sh
kubectl create secret tls pg-tls \
--key pg-tls.key \
--cert pg-tls.crt
```
Or with a CA:
```sh
kubectl create secret generic pg-tls \
--from-file=tls.crt=server.crt \
--from-file=tls.key=server.key \
--from-file=ca.crt=ca.crt
```
Alternatively it is also possible to use
[cert-manager](https://cert-manager.io/docs/) to generate these secrets.
Then configure the postgres resource with the TLS secret:
```yaml
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: acid-test-cluster
spec:
tls:
secretName: "pg-tls"
caFile: "ca.crt" # add this if the secret is configured with a CA
```
Certificate rotation is handled in the spilo image which checks every 5
minutes if the certificates have changed and reloads postgres accordingly.

1
go.mod
View File

@ -11,6 +11,7 @@ require (
github.com/lib/pq v1.2.0
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
github.com/sirupsen/logrus v1.4.2
github.com/stretchr/testify v1.4.0
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 // indirect

1
go.sum
View File

@ -275,6 +275,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=

View File

@ -100,3 +100,10 @@ spec:
# env:
# - name: "USEFUL_VAR"
# value: "perhaps-true"
# Custom TLS certificate. Disabled unless tls.secretName has a value.
tls:
secretName: "" # should correspond to a Kubernetes Secret resource to load
certificateFile: "tls.crt"
privateKeyFile: "tls.key"
caFile: "" # optionally configure Postgres with a CA certificate

View File

@ -251,6 +251,19 @@ spec:
type: string
teamId:
type: string
tls:
type: object
required:
- secretName
properties:
secretName:
type: string
certificateFile:
type: string
privateKeyFile:
type: string
caFile:
type: string
tolerations:
type: array
items:

View File

@ -417,6 +417,24 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
"teamId": {
Type: "string",
},
"tls": {
Type: "object",
Required: []string{"secretName"},
Properties: map[string]apiextv1beta1.JSONSchemaProps{
"secretName": {
Type: "string",
},
"certificateFile": {
Type: "string",
},
"privateKeyFile": {
Type: "string",
},
"caFile": {
Type: "string",
},
},
},
"tolerations": {
Type: "array",
Items: &apiextv1beta1.JSONSchemaPropsOrArray{

View File

@ -61,6 +61,7 @@ type PostgresSpec struct {
StandbyCluster *StandbyDescription `json:"standby"`
PodAnnotations map[string]string `json:"podAnnotations"`
ServiceAnnotations map[string]string `json:"serviceAnnotations"`
TLS *TLSDescription `json:"tls"`
// deprecated json tags
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
@ -126,6 +127,13 @@ type StandbyDescription struct {
S3WalPath string `json:"s3_wal_path,omitempty"`
}
type TLSDescription struct {
SecretName string `json:"secretName,omitempty"`
CertificateFile string `json:"certificateFile,omitempty"`
PrivateKeyFile string `json:"privateKeyFile,omitempty"`
CAFile string `json:"caFile,omitempty"`
}
// CloneDescription describes which cluster the new should clone and up to which point in time
type CloneDescription struct {
ClusterName string `json:"cluster,omitempty"`

View File

@ -521,6 +521,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
(*out)[key] = val
}
}
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = new(TLSDescription)
**out = **in
}
if in.InitContainersOld != nil {
in, out := &in.InitContainersOld, &out.InitContainersOld
*out = make([]corev1.Container, len(*in))
@ -752,6 +757,22 @@ func (in *StandbyDescription) DeepCopy() *StandbyDescription {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSDescription) DeepCopyInto(out *TLSDescription) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSDescription.
func (in *TLSDescription) DeepCopy() *TLSDescription {
if in == nil {
return nil
}
out := new(TLSDescription)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) {
*out = *in

View File

@ -3,6 +3,7 @@ package cluster
import (
"encoding/json"
"fmt"
"path"
"sort"
"github.com/sirupsen/logrus"
@ -30,7 +31,10 @@ const (
patroniPGBinariesParameterName = "bin_dir"
patroniPGParametersParameterName = "parameters"
patroniPGHBAConfParameterName = "pg_hba"
localHost = "127.0.0.1/32"
// the gid of the postgres user in the default spilo image
spiloPostgresGID = 103
localHost = "127.0.0.1/32"
)
type pgUser struct {
@ -446,6 +450,7 @@ func generatePodTemplate(
podAntiAffinityTopologyKey string,
additionalSecretMount string,
additionalSecretMountPath string,
volumes []v1.Volume,
) (*v1.PodTemplateSpec, error) {
terminateGracePeriodSeconds := terminateGracePeriod
@ -464,6 +469,7 @@ func generatePodTemplate(
InitContainers: initContainers,
Tolerations: *tolerationsSpec,
SecurityContext: &securityContext,
Volumes: volumes,
}
if shmVolume != nil && *shmVolume {
@ -724,6 +730,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
sidecarContainers []v1.Container
podTemplate *v1.PodTemplateSpec
volumeClaimTemplate *v1.PersistentVolumeClaim
volumes []v1.Volume
)
// Improve me. Please.
@ -840,21 +847,76 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
}
// generate environment variables for the spilo container
spiloEnvVars := deduplicateEnvVars(
c.generateSpiloPodEnvVars(c.Postgresql.GetUID(), spiloConfiguration, &spec.Clone,
spec.StandbyCluster, customPodEnvVarsList), c.containerName(), c.logger)
spiloEnvVars := c.generateSpiloPodEnvVars(
c.Postgresql.GetUID(),
spiloConfiguration,
&spec.Clone,
spec.StandbyCluster,
customPodEnvVarsList,
)
// pickup the docker image for the spilo container
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
// determine the FSGroup for the spilo pod
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
if spec.SpiloFSGroup != nil {
effectiveFSGroup = spec.SpiloFSGroup
}
volumeMounts := generateVolumeMounts(spec.Volume)
// configure TLS with a custom secret volume
if spec.TLS != nil && spec.TLS.SecretName != "" {
if effectiveFSGroup == nil {
c.logger.Warnf("Setting the default FSGroup to satisfy the TLS configuration")
fsGroup := int64(spiloPostgresGID)
effectiveFSGroup = &fsGroup
}
// this is combined with the FSGroup above to give read access to the
// postgres user
defaultMode := int32(0640)
volumes = append(volumes, v1.Volume{
Name: "tls-secret",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: spec.TLS.SecretName,
DefaultMode: &defaultMode,
},
},
})
mountPath := "/tls"
volumeMounts = append(volumeMounts, v1.VolumeMount{
MountPath: mountPath,
Name: "tls-secret",
ReadOnly: true,
})
// use the same filenames as Secret resources by default
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt")
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key")
spiloEnvVars = append(
spiloEnvVars,
v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: certFile},
v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: privateKeyFile},
)
if spec.TLS.CAFile != "" {
caFile := ensurePath(spec.TLS.CAFile, mountPath, "")
spiloEnvVars = append(
spiloEnvVars,
v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile},
)
}
}
// generate the spilo container
c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars)
spiloContainer := generateContainer(c.containerName(),
&effectiveDockerImage,
resourceRequirements,
spiloEnvVars,
deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
volumeMounts,
c.OpConfig.Resources.SpiloPrivileged,
)
@ -893,16 +955,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
// determine the FSGroup for the spilo pod
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
if spec.SpiloFSGroup != nil {
effectiveFSGroup = spec.SpiloFSGroup
}
annotations := c.generatePodAnnotations(spec)
// generate pod template for the statefulset, based on the spilo container and sidecars
if podTemplate, err = generatePodTemplate(
podTemplate, err = generatePodTemplate(
c.Namespace,
c.labelsSet(true),
annotations,
@ -920,10 +976,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
c.OpConfig.EnablePodAntiAffinity,
c.OpConfig.PodAntiAffinityTopologyKey,
c.OpConfig.AdditionalSecretMount,
c.OpConfig.AdditionalSecretMountPath); err != nil {
return nil, fmt.Errorf("could not generate pod template: %v", err)
}
c.OpConfig.AdditionalSecretMountPath,
volumes,
)
if err != nil {
return nil, fmt.Errorf("could not generate pod template: %v", err)
}
@ -1539,7 +1594,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
false,
"",
c.OpConfig.AdditionalSecretMount,
c.OpConfig.AdditionalSecretMountPath); err != nil {
c.OpConfig.AdditionalSecretMountPath,
nil); err != nil {
return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
}
@ -1671,3 +1727,13 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
func (c *Cluster) getLogicalBackupJobName() (jobName string) {
return "logical-backup-" + c.clusterName().Name
}
func ensurePath(file string, defaultDir string, defaultFile string) string {
if file == "" {
return path.Join(defaultDir, defaultFile)
}
if !path.IsAbs(file) {
return path.Join(defaultDir, file)
}
return file
}

View File

@ -3,16 +3,17 @@ package cluster
import (
"reflect"
v1 "k8s.io/api/core/v1"
"testing"
"github.com/stretchr/testify/assert"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
v1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@ -451,3 +452,65 @@ func TestSecretVolume(t *testing.T) {
}
}
}
func TestTLS(t *testing.T) {
var err error
var spec acidv1.PostgresSpec
var cluster *Cluster
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
return acidv1.PostgresSpec{
TeamID: "myapp", NumberOfInstances: 1,
Resources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
},
Volume: acidv1.Volume{
Size: "1G",
},
TLS: &tls,
}
}
cluster = New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
s, err := cluster.generateStatefulSet(&spec)
if err != nil {
assert.NoError(t, err)
}
fsGroup := int64(103)
assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
defaultMode := int32(0640)
volume := v1.Volume{
Name: "tls-secret",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "my-secret",
DefaultMode: &defaultMode,
},
},
}
assert.Contains(t, s.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
MountPath: "/tls",
Name: "tls-secret",
ReadOnly: true,
}, "the volume gets mounted in /tls")
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
}