Merge branch 'master' into feature/connection-pooler

This commit is contained in:
Dmitrii Dolgov 2020-03-16 12:07:56 +01:00
commit 4d61adf6b7
20 changed files with 501 additions and 38 deletions

View File

@ -218,7 +218,7 @@ configLogicalBackup:
logical_backup_s3_endpoint: "" logical_backup_s3_endpoint: ""
# S3 Secret Access Key # S3 Secret Access Key
logical_backup_s3_secret_access_key: "" logical_backup_s3_secret_access_key: ""
# S3 server side encription # S3 server side encryption
logical_backup_s3_sse: "AES256" logical_backup_s3_sse: "AES256"
# backup schedule in the cron format # backup schedule in the cron format
logical_backup_schedule: "30 00 * * *" logical_backup_schedule: "30 00 * * *"

View File

@ -209,7 +209,7 @@ configLogicalBackup:
logical_backup_s3_endpoint: "" logical_backup_s3_endpoint: ""
# S3 Secret Access Key # S3 Secret Access Key
logical_backup_s3_secret_access_key: "" logical_backup_s3_secret_access_key: ""
# S3 server side encription # S3 server side encryption
logical_backup_s3_sse: "AES256" logical_backup_s3_sse: "AES256"
# backup schedule in the cron format # backup schedule in the cron format
logical_backup_schedule: "30 00 * * *" logical_backup_schedule: "30 00 * * *"

View File

@ -392,3 +392,24 @@ present.
* **resources** * **resources**
Resource configuration for connection pool deployment. Resource configuration for connection pool deployment.
## Custom TLS certificates
Those parameters are grouped under the `tls` top-level key.
* **secretName**
By setting the `secretName` value, the cluster will switch to load the given
Kubernetes Secret into the container as a volume and uses that as the
certificate instead. It is up to the user to create and manage the
Kubernetes Secret either by hand or using a tool like the CertManager
operator.
* **certificateFile**
Filename of the certificate. Defaults to "tls.crt".
* **privateKeyFile**
Filename of the private key. Defaults to "tls.key".
* **caFile**
Optional filename to the CA certificate. Useful when the client connects
with `sslmode=verify-ca` or `sslmode=verify-full`.

View File

@ -284,7 +284,7 @@ configuration they are grouped under the `kubernetes` key.
used for AWS volume resizing and not required if you don't need that used for AWS volume resizing and not required if you don't need that
capability. The default is `false`. capability. The default is `false`.
* **master_pod_move_timeout** * **master_pod_move_timeout**
The period of time to wait for the success of migration of master pods from The period of time to wait for the success of migration of master pods from
an unschedulable node. The migration includes Patroni switchovers to an unschedulable node. The migration includes Patroni switchovers to
respective replicas on healthy nodes. The situation where master pods still respective replicas on healthy nodes. The situation where master pods still
@ -472,7 +472,7 @@ grouped under the `logical_backup` key.
When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty. When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty.
* **logical_backup_s3_sse** * **logical_backup_s3_sse**
Specify server side encription that S3 storage is using. If empty string Specify server side encryption that S3 storage is using. If empty string
is specified, no argument will be passed to `aws s3` command. Default: "AES256". is specified, no argument will be passed to `aws s3` command. Default: "AES256".
* **logical_backup_s3_access_key_id** * **logical_backup_s3_access_key_id**

View File

@ -564,3 +564,50 @@ should be general approach between different implementation).
Note, that using `pgbouncer` means meaningful resource CPU limit should be less Note, that using `pgbouncer` means meaningful resource CPU limit should be less
than 1 core (there is a way to utilize more than one, but in K8S it's easier than 1 core (there is a way to utilize more than one, but in K8S it's easier
just to spin up more instances). just to spin up more instances).
## Custom TLS certificates
By default, the spilo image generates its own TLS certificate during startup.
This certificate is not secure since it cannot be verified and thus doesn't
protect from active MITM attacks. In this section we show how a Kubernete
Secret resources can be loaded with a custom TLS certificate.
Before applying these changes, the operator must also be configured with the
`spilo_fsgroup` set to the GID matching the postgres user group. If the value
is not provided, the cluster will default to `103` which is the GID from the
default spilo image.
Upload the cert as a kubernetes secret:
```sh
kubectl create secret tls pg-tls \
--key pg-tls.key \
--cert pg-tls.crt
```
Or with a CA:
```sh
kubectl create secret generic pg-tls \
--from-file=tls.crt=server.crt \
--from-file=tls.key=server.key \
--from-file=ca.crt=ca.crt
```
Alternatively it is also possible to use
[cert-manager](https://cert-manager.io/docs/) to generate these secrets.
Then configure the postgres resource with the TLS secret:
```yaml
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: acid-test-cluster
spec:
tls:
secretName: "pg-tls"
caFile: "ca.crt" # add this if the secret is configured with a CA
```
Certificate rotation is handled in the spilo image which checks every 5
minutes if the certificates have changed and reloads postgres accordingly.

View File

@ -537,13 +537,15 @@ class EndToEndTestCase(unittest.TestCase):
self.assert_failover( self.assert_failover(
master_node, len(replica_nodes), failover_targets, cluster_label) master_node, len(replica_nodes), failover_targets, cluster_label)
# disable pod anti affintiy again # now disable pod anti affintiy again which will cause yet another failover
patch_disable_antiaffinity = { patch_disable_antiaffinity = {
"data": { "data": {
"enable_pod_antiaffinity": "false" "enable_pod_antiaffinity": "false"
} }
} }
k8s.update_config(patch_disable_antiaffinity) k8s.update_config(patch_disable_antiaffinity)
k8s.wait_for_pod_start('spilo-role=master')
k8s.wait_for_pod_start('spilo-role=replica')
class K8sApi: class K8sApi:

1
go.mod
View File

@ -11,6 +11,7 @@ require (
github.com/lib/pq v1.2.0 github.com/lib/pq v1.2.0
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
github.com/sirupsen/logrus v1.4.2 github.com/sirupsen/logrus v1.4.2
github.com/stretchr/testify v1.4.0
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 // indirect golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 // indirect

1
go.sum
View File

@ -275,6 +275,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=

View File

@ -24,13 +24,14 @@ package cmd
import ( import (
"fmt" "fmt"
"github.com/spf13/cobra"
"github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"log" "log"
"strconv" "strconv"
"time" "time"
"github.com/spf13/cobra"
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
const ( const (
@ -95,8 +96,12 @@ func listAll(listPostgres *v1.PostgresqlList) {
template := "%-32s%-16s%-12s%-12s%-12s%-12s%-12s\n" template := "%-32s%-16s%-12s%-12s%-12s%-12s%-12s\n"
fmt.Printf(template, "NAME", "STATUS", "INSTANCES", "VERSION", "AGE", "VOLUME", "NAMESPACE") fmt.Printf(template, "NAME", "STATUS", "INSTANCES", "VERSION", "AGE", "VOLUME", "NAMESPACE")
for _, pgObjs := range listPostgres.Items { for _, pgObjs := range listPostgres.Items {
fmt.Printf(template, pgObjs.Name, pgObjs.Status.PostgresClusterStatus, strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)), fmt.Printf(template, pgObjs.Name,
pgObjs.Spec.PgVersion, time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp), pgObjs.Spec.Size, pgObjs.Namespace) pgObjs.Status.PostgresClusterStatus,
strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)),
pgObjs.Spec.PostgresqlParam.PgVersion,
time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp),
pgObjs.Spec.Size, pgObjs.Namespace)
} }
} }
@ -104,8 +109,12 @@ func listWithNamespace(listPostgres *v1.PostgresqlList) {
template := "%-32s%-16s%-12s%-12s%-12s%-12s\n" template := "%-32s%-16s%-12s%-12s%-12s%-12s\n"
fmt.Printf(template, "NAME", "STATUS", "INSTANCES", "VERSION", "AGE", "VOLUME") fmt.Printf(template, "NAME", "STATUS", "INSTANCES", "VERSION", "AGE", "VOLUME")
for _, pgObjs := range listPostgres.Items { for _, pgObjs := range listPostgres.Items {
fmt.Printf(template, pgObjs.Name, pgObjs.Status.PostgresClusterStatus, strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)), fmt.Printf(template, pgObjs.Name,
pgObjs.Spec.PgVersion, time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp), pgObjs.Spec.Size) pgObjs.Status.PostgresClusterStatus,
strconv.Itoa(int(pgObjs.Spec.NumberOfInstances)),
pgObjs.Spec.PostgresqlParam.PgVersion,
time.Since(pgObjs.CreationTimestamp.Time).Truncate(TrimCreateTimestamp),
pgObjs.Spec.Size)
} }
} }

View File

@ -100,3 +100,10 @@ spec:
# env: # env:
# - name: "USEFUL_VAR" # - name: "USEFUL_VAR"
# value: "perhaps-true" # value: "perhaps-true"
# Custom TLS certificate. Disabled unless tls.secretName has a value.
tls:
secretName: "" # should correspond to a Kubernetes Secret resource to load
certificateFile: "tls.crt"
privateKeyFile: "tls.key"
caFile: "" # optionally configure Postgres with a CA certificate

View File

@ -302,6 +302,19 @@ spec:
type: string type: string
teamId: teamId:
type: string type: string
tls:
type: object
required:
- secretName
properties:
secretName:
type: string
certificateFile:
type: string
privateKeyFile:
type: string
caFile:
type: string
tolerations: tolerations:
type: array type: array
items: items:

View File

@ -490,6 +490,24 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
"teamId": { "teamId": {
Type: "string", Type: "string",
}, },
"tls": {
Type: "object",
Required: []string{"secretName"},
Properties: map[string]apiextv1beta1.JSONSchemaProps{
"secretName": {
Type: "string",
},
"certificateFile": {
Type: "string",
},
"privateKeyFile": {
Type: "string",
},
"caFile": {
Type: "string",
},
},
},
"tolerations": { "tolerations": {
Type: "array", Type: "array",
Items: &apiextv1beta1.JSONSchemaPropsOrArray{ Items: &apiextv1beta1.JSONSchemaPropsOrArray{

View File

@ -55,7 +55,7 @@ type KubernetesMetaConfiguration struct {
EnableInitContainers *bool `json:"enable_init_containers,omitempty"` EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
EnableSidecars *bool `json:"enable_sidecars,omitempty"` EnableSidecars *bool `json:"enable_sidecars,omitempty"`
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"` SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
ClusterDomain string `json:"cluster_domain"` ClusterDomain string `json:"cluster_domain,omitempty"`
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`
InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"`
PodRoleLabel string `json:"pod_role_label,omitempty"` PodRoleLabel string `json:"pod_role_label,omitempty"`

View File

@ -66,6 +66,7 @@ type PostgresSpec struct {
StandbyCluster *StandbyDescription `json:"standby"` StandbyCluster *StandbyDescription `json:"standby"`
PodAnnotations map[string]string `json:"podAnnotations"` PodAnnotations map[string]string `json:"podAnnotations"`
ServiceAnnotations map[string]string `json:"serviceAnnotations"` ServiceAnnotations map[string]string `json:"serviceAnnotations"`
TLS *TLSDescription `json:"tls"`
// deprecated json tags // deprecated json tags
InitContainersOld []v1.Container `json:"init_containers,omitempty"` InitContainersOld []v1.Container `json:"init_containers,omitempty"`
@ -131,6 +132,13 @@ type StandbyDescription struct {
S3WalPath string `json:"s3_wal_path,omitempty"` S3WalPath string `json:"s3_wal_path,omitempty"`
} }
type TLSDescription struct {
SecretName string `json:"secretName,omitempty"`
CertificateFile string `json:"certificateFile,omitempty"`
PrivateKeyFile string `json:"privateKeyFile,omitempty"`
CAFile string `json:"caFile,omitempty"`
}
// CloneDescription describes which cluster the new should clone and up to which point in time // CloneDescription describes which cluster the new should clone and up to which point in time
type CloneDescription struct { type CloneDescription struct {
ClusterName string `json:"cluster,omitempty"` ClusterName string `json:"cluster,omitempty"`

View File

@ -585,6 +585,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = new(TLSDescription)
**out = **in
}
if in.InitContainersOld != nil { if in.InitContainersOld != nil {
in, out := &in.InitContainersOld, &out.InitContainersOld in, out := &in.InitContainersOld, &out.InitContainersOld
*out = make([]corev1.Container, len(*in)) *out = make([]corev1.Container, len(*in))
@ -816,6 +821,22 @@ func (in *StandbyDescription) DeepCopy() *StandbyDescription {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSDescription) DeepCopyInto(out *TLSDescription) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSDescription.
func (in *TLSDescription) DeepCopy() *TLSDescription {
if in == nil {
return nil
}
out := new(TLSDescription)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) { func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) {
*out = *in *out = *in

View File

@ -593,10 +593,11 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
} }
}() }()
if oldSpec.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison if oldSpec.Spec.PostgresqlParam.PgVersion != newSpec.Spec.PostgresqlParam.PgVersion { // PG versions comparison
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", oldSpec.Spec.PgVersion, newSpec.Spec.PgVersion) c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
//we need that hack to generate statefulset with the old version //we need that hack to generate statefulset with the old version
newSpec.Spec.PgVersion = oldSpec.Spec.PgVersion newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
} }
// Service // Service

View File

@ -3,6 +3,7 @@ package cluster
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"path"
"sort" "sort"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -27,13 +28,16 @@ import (
) )
const ( const (
pgBinariesLocationTemplate = "/usr/lib/postgresql/%s/bin" pgBinariesLocationTemplate = "/usr/lib/postgresql/%v/bin"
patroniPGBinariesParameterName = "bin_dir" patroniPGBinariesParameterName = "bin_dir"
patroniPGParametersParameterName = "parameters" patroniPGParametersParameterName = "parameters"
patroniPGHBAConfParameterName = "pg_hba" patroniPGHBAConfParameterName = "pg_hba"
localHost = "127.0.0.1/32" localHost = "127.0.0.1/32"
connectionPoolContainer = "connection-pool" connectionPoolContainer = "connection-pool"
pgPort = 5432 pgPort = 5432
// the gid of the postgres user in the default spilo image
spiloPostgresGID = 103
) )
type pgUser struct { type pgUser struct {
@ -508,6 +512,7 @@ func generatePodTemplate(
podAntiAffinityTopologyKey string, podAntiAffinityTopologyKey string,
additionalSecretMount string, additionalSecretMount string,
additionalSecretMountPath string, additionalSecretMountPath string,
volumes []v1.Volume,
) (*v1.PodTemplateSpec, error) { ) (*v1.PodTemplateSpec, error) {
terminateGracePeriodSeconds := terminateGracePeriod terminateGracePeriodSeconds := terminateGracePeriod
@ -526,6 +531,7 @@ func generatePodTemplate(
InitContainers: initContainers, InitContainers: initContainers,
Tolerations: *tolerationsSpec, Tolerations: *tolerationsSpec,
SecurityContext: &securityContext, SecurityContext: &securityContext,
Volumes: volumes,
} }
if shmVolume != nil && *shmVolume { if shmVolume != nil && *shmVolume {
@ -778,6 +784,50 @@ func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acid
} }
} }
func extractPgVersionFromBinPath(binPath string, template string) (string, error) {
var pgVersion float32
_, err := fmt.Sscanf(binPath, template, &pgVersion)
if err != nil {
return "", err
}
return fmt.Sprintf("%v", pgVersion), nil
}
func (c *Cluster) getNewPgVersion(container v1.Container, newPgVersion string) (string, error) {
var (
spiloConfiguration spiloConfiguration
runningPgVersion string
err error
)
for _, env := range container.Env {
if env.Name != "SPILO_CONFIGURATION" {
continue
}
err = json.Unmarshal([]byte(env.Value), &spiloConfiguration)
if err != nil {
return newPgVersion, err
}
}
if len(spiloConfiguration.PgLocalConfiguration) > 0 {
currentBinPath := fmt.Sprintf("%v", spiloConfiguration.PgLocalConfiguration[patroniPGBinariesParameterName])
runningPgVersion, err = extractPgVersionFromBinPath(currentBinPath, pgBinariesLocationTemplate)
if err != nil {
return "", fmt.Errorf("could not extract Postgres version from %v in SPILO_CONFIGURATION", currentBinPath)
}
} else {
return "", fmt.Errorf("could not find %q setting in SPILO_CONFIGURATION", patroniPGBinariesParameterName)
}
if runningPgVersion != newPgVersion {
c.logger.Warningf("postgresql version change(%q -> %q) has no effect", runningPgVersion, newPgVersion)
newPgVersion = runningPgVersion
}
return newPgVersion, nil
}
func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.StatefulSet, error) { func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.StatefulSet, error) {
var ( var (
@ -786,6 +836,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
sidecarContainers []v1.Container sidecarContainers []v1.Container
podTemplate *v1.PodTemplateSpec podTemplate *v1.PodTemplateSpec
volumeClaimTemplate *v1.PersistentVolumeClaim volumeClaimTemplate *v1.PersistentVolumeClaim
volumes []v1.Volume
) )
// Improve me. Please. // Improve me. Please.
@ -902,21 +953,76 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
} }
// generate environment variables for the spilo container // generate environment variables for the spilo container
spiloEnvVars := deduplicateEnvVars( spiloEnvVars := c.generateSpiloPodEnvVars(
c.generateSpiloPodEnvVars(c.Postgresql.GetUID(), spiloConfiguration, &spec.Clone, c.Postgresql.GetUID(),
spec.StandbyCluster, customPodEnvVarsList), c.containerName(), c.logger) spiloConfiguration,
&spec.Clone,
spec.StandbyCluster,
customPodEnvVarsList,
)
// pickup the docker image for the spilo container // pickup the docker image for the spilo container
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage) effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
// determine the FSGroup for the spilo pod
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
if spec.SpiloFSGroup != nil {
effectiveFSGroup = spec.SpiloFSGroup
}
volumeMounts := generateVolumeMounts(spec.Volume) volumeMounts := generateVolumeMounts(spec.Volume)
// configure TLS with a custom secret volume
if spec.TLS != nil && spec.TLS.SecretName != "" {
if effectiveFSGroup == nil {
c.logger.Warnf("Setting the default FSGroup to satisfy the TLS configuration")
fsGroup := int64(spiloPostgresGID)
effectiveFSGroup = &fsGroup
}
// this is combined with the FSGroup above to give read access to the
// postgres user
defaultMode := int32(0640)
volumes = append(volumes, v1.Volume{
Name: "tls-secret",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: spec.TLS.SecretName,
DefaultMode: &defaultMode,
},
},
})
mountPath := "/tls"
volumeMounts = append(volumeMounts, v1.VolumeMount{
MountPath: mountPath,
Name: "tls-secret",
ReadOnly: true,
})
// use the same filenames as Secret resources by default
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt")
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key")
spiloEnvVars = append(
spiloEnvVars,
v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: certFile},
v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: privateKeyFile},
)
if spec.TLS.CAFile != "" {
caFile := ensurePath(spec.TLS.CAFile, mountPath, "")
spiloEnvVars = append(
spiloEnvVars,
v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile},
)
}
}
// generate the spilo container // generate the spilo container
c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars) c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars)
spiloContainer := generateContainer(c.containerName(), spiloContainer := generateContainer(c.containerName(),
&effectiveDockerImage, &effectiveDockerImage,
resourceRequirements, resourceRequirements,
spiloEnvVars, deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
volumeMounts, volumeMounts,
c.OpConfig.Resources.SpiloPrivileged, c.OpConfig.Resources.SpiloPrivileged,
) )
@ -955,16 +1061,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
// determine the FSGroup for the spilo pod
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
if spec.SpiloFSGroup != nil {
effectiveFSGroup = spec.SpiloFSGroup
}
annotations := c.generatePodAnnotations(spec) annotations := c.generatePodAnnotations(spec)
// generate pod template for the statefulset, based on the spilo container and sidecars // generate pod template for the statefulset, based on the spilo container and sidecars
if podTemplate, err = generatePodTemplate( podTemplate, err = generatePodTemplate(
c.Namespace, c.Namespace,
c.labelsSet(true), c.labelsSet(true),
annotations, annotations,
@ -982,10 +1082,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
c.OpConfig.EnablePodAntiAffinity, c.OpConfig.EnablePodAntiAffinity,
c.OpConfig.PodAntiAffinityTopologyKey, c.OpConfig.PodAntiAffinityTopologyKey,
c.OpConfig.AdditionalSecretMount, c.OpConfig.AdditionalSecretMount,
c.OpConfig.AdditionalSecretMountPath); err != nil { c.OpConfig.AdditionalSecretMountPath,
return nil, fmt.Errorf("could not generate pod template: %v", err) volumes,
} )
if err != nil { if err != nil {
return nil, fmt.Errorf("could not generate pod template: %v", err) return nil, fmt.Errorf("could not generate pod template: %v", err)
} }
@ -1601,7 +1700,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
false, false,
"", "",
c.OpConfig.AdditionalSecretMount, c.OpConfig.AdditionalSecretMount,
c.OpConfig.AdditionalSecretMountPath); err != nil { c.OpConfig.AdditionalSecretMountPath,
nil); err != nil {
return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err) return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
} }
@ -1686,7 +1786,7 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
// Postgres env vars // Postgres env vars
{ {
Name: "PG_VERSION", Name: "PG_VERSION",
Value: c.Spec.PgVersion, Value: c.Spec.PostgresqlParam.PgVersion,
}, },
{ {
Name: "PGPORT", Name: "PGPORT",
@ -2026,3 +2126,13 @@ func (c *Cluster) generateConnPoolService(spec *acidv1.PostgresSpec) *v1.Service
return service return service
} }
func ensurePath(file string, defaultDir string, defaultFile string) string {
if file == "" {
return path.Join(defaultDir, defaultFile)
}
if !path.IsAbs(file) {
return path.Join(defaultDir, file)
}
return file
}

View File

@ -5,10 +5,10 @@ import (
"fmt" "fmt"
"reflect" "reflect"
v1 "k8s.io/api/core/v1"
"testing" "testing"
"github.com/stretchr/testify/assert"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/config"
@ -16,6 +16,7 @@ import (
"github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/k8sutil"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
@ -384,6 +385,135 @@ func TestCloneEnv(t *testing.T) {
} }
} }
func TestExtractPgVersionFromBinPath(t *testing.T) {
testName := "TestExtractPgVersionFromBinPath"
tests := []struct {
subTest string
binPath string
template string
expected string
}{
{
subTest: "test current bin path with decimal against hard coded template",
binPath: "/usr/lib/postgresql/9.6/bin",
template: pgBinariesLocationTemplate,
expected: "9.6",
},
{
subTest: "test current bin path against hard coded template",
binPath: "/usr/lib/postgresql/12/bin",
template: pgBinariesLocationTemplate,
expected: "12",
},
{
subTest: "test alternative bin path against a matching template",
binPath: "/usr/pgsql-12/bin",
template: "/usr/pgsql-%v/bin",
expected: "12",
},
}
for _, tt := range tests {
pgVersion, err := extractPgVersionFromBinPath(tt.binPath, tt.template)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if pgVersion != tt.expected {
t.Errorf("%s %s: Expected version %s, have %s instead",
testName, tt.subTest, tt.expected, pgVersion)
}
}
}
func TestGetPgVersion(t *testing.T) {
testName := "TestGetPgVersion"
tests := []struct {
subTest string
pgContainer v1.Container
currentPgVersion string
newPgVersion string
}{
{
subTest: "new version with decimal point differs from current SPILO_CONFIGURATION",
pgContainer: v1.Container{
Name: "postgres",
Env: []v1.EnvVar{
{
Name: "SPILO_CONFIGURATION",
Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/9.6/bin\"}}",
},
},
},
currentPgVersion: "9.6",
newPgVersion: "12",
},
{
subTest: "new version differs from current SPILO_CONFIGURATION",
pgContainer: v1.Container{
Name: "postgres",
Env: []v1.EnvVar{
{
Name: "SPILO_CONFIGURATION",
Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/11/bin\"}}",
},
},
},
currentPgVersion: "11",
newPgVersion: "12",
},
{
subTest: "new version is lower than the one found in current SPILO_CONFIGURATION",
pgContainer: v1.Container{
Name: "postgres",
Env: []v1.EnvVar{
{
Name: "SPILO_CONFIGURATION",
Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/12/bin\"}}",
},
},
},
currentPgVersion: "12",
newPgVersion: "11",
},
{
subTest: "new version is the same like in the current SPILO_CONFIGURATION",
pgContainer: v1.Container{
Name: "postgres",
Env: []v1.EnvVar{
{
Name: "SPILO_CONFIGURATION",
Value: "{\"postgresql\": {\"bin_dir\": \"/usr/lib/postgresql/12/bin\"}}",
},
},
},
currentPgVersion: "12",
newPgVersion: "12",
},
}
var cluster = New(
Config{
OpConfig: config.Config{
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
for _, tt := range tests {
pgVersion, err := cluster.getNewPgVersion(tt.pgContainer, tt.newPgVersion)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if pgVersion != tt.currentPgVersion {
t.Errorf("%s %s: Expected version %s, have %s instead",
testName, tt.subTest, tt.currentPgVersion, pgVersion)
}
}
}
func TestSecretVolume(t *testing.T) { func TestSecretVolume(t *testing.T) {
testName := "TestSecretVolume" testName := "TestSecretVolume"
tests := []struct { tests := []struct {
@ -823,3 +953,65 @@ func TestConnPoolServiceSpec(t *testing.T) {
} }
} }
} }
func TestTLS(t *testing.T) {
var err error
var spec acidv1.PostgresSpec
var cluster *Cluster
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
return acidv1.PostgresSpec{
TeamID: "myapp", NumberOfInstances: 1,
Resources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
},
Volume: acidv1.Volume{
Size: "1G",
},
TLS: &tls,
}
}
cluster = New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
s, err := cluster.generateStatefulSet(&spec)
if err != nil {
assert.NoError(t, err)
}
fsGroup := int64(103)
assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
defaultMode := int32(0640)
volume := v1.Volume{
Name: "tls-secret",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: "my-secret",
DefaultMode: &defaultMode,
},
},
}
assert.Contains(t, s.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
MountPath: "/tls",
Name: "tls-secret",
ReadOnly: true,
}, "the volume gets mounted in /tls")
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
}

View File

@ -291,6 +291,18 @@ func (c *Cluster) syncStatefulSet() error {
// statefulset is already there, make sure we use its definition in order to compare with the spec. // statefulset is already there, make sure we use its definition in order to compare with the spec.
c.Statefulset = sset c.Statefulset = sset
// check if there is no Postgres version mismatch
for _, container := range c.Statefulset.Spec.Template.Spec.Containers {
if container.Name != "postgres" {
continue
}
pgVersion, err := c.getNewPgVersion(container, c.Spec.PostgresqlParam.PgVersion)
if err != nil {
return fmt.Errorf("could not parse current Postgres version: %v", err)
}
c.Spec.PostgresqlParam.PgVersion = pgVersion
}
desiredSS, err := c.generateStatefulSet(&c.Spec) desiredSS, err := c.generateStatefulSet(&c.Spec)
if err != nil { if err != nil {
return fmt.Errorf("could not generate statefulset: %v", err) return fmt.Errorf("could not generate statefulset: %v", err)

View File

@ -56,7 +56,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod) result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod)
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
result.ClusterDomain = fromCRD.Kubernetes.ClusterDomain result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
result.EnablePodDisruptionBudget = fromCRD.Kubernetes.EnablePodDisruptionBudget result.EnablePodDisruptionBudget = fromCRD.Kubernetes.EnablePodDisruptionBudget