refactor pooler tls support and set pooler pod security context (#2255)
* bump pooler image * set pooler pod security context * use hard coded RunAsUser 100 and RunAsGroup 101 for pooler pod * unify generation of TLS secret mounts * extend documentation on tls support * add unit test for testing TLS support for pooler * add e2e test for tls support
This commit is contained in:
parent
87b7ac0806
commit
0e7beb5fe5
|
|
@ -95,6 +95,7 @@ coverage.xml
|
|||
|
||||
# e2e tests
|
||||
e2e/manifests
|
||||
e2e/tls
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
|
|
|
|||
|
|
@ -637,7 +637,7 @@ spec:
|
|||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
default: 60
|
||||
|
|
|
|||
|
|
@ -416,7 +416,7 @@ configConnectionPooler:
|
|||
# db user for pooler to use
|
||||
connection_pooler_user: "pooler"
|
||||
# docker image
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
||||
# max db connections the pooler should hold
|
||||
connection_pooler_max_db_connections: 60
|
||||
# default pooling mode
|
||||
|
|
|
|||
|
|
@ -543,7 +543,9 @@ for both master and replica pooler services (if `enableReplicaConnectionPooler`
|
|||
|
||||
## Custom TLS certificates
|
||||
|
||||
Those parameters are grouped under the `tls` top-level key.
|
||||
Those parameters are grouped under the `tls` top-level key. Note, you have to
|
||||
define `spiloFSGroup` in the Postgres cluster manifest or `spilo_fsgroup` in
|
||||
the global configuration before adding the `tls` section'.
|
||||
|
||||
* **secretName**
|
||||
By setting the `secretName` value, the cluster will switch to load the given
|
||||
|
|
|
|||
29
docs/user.md
29
docs/user.md
|
|
@ -1197,14 +1197,19 @@ don't know the value, use `103` which is the GID from the default Spilo image
|
|||
OpenShift allocates the users and groups dynamically (based on scc), and their
|
||||
range is different in every namespace. Due to this dynamic behaviour, it's not
|
||||
trivial to know at deploy time the uid/gid of the user in the cluster.
|
||||
Therefore, instead of using a global `spilo_fsgroup` setting, use the
|
||||
`spiloFSGroup` field per Postgres cluster.
|
||||
Therefore, instead of using a global `spilo_fsgroup` setting in operator
|
||||
configuration or use the `spiloFSGroup` field per Postgres cluster manifest.
|
||||
|
||||
For testing purposes, you can generate a self-signed certificate with openssl:
|
||||
```sh
|
||||
openssl req -x509 -nodes -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=acid.zalan.do"
|
||||
```
|
||||
|
||||
Upload the cert as a kubernetes secret:
|
||||
```sh
|
||||
kubectl create secret tls pg-tls \
|
||||
--key pg-tls.key \
|
||||
--cert pg-tls.crt
|
||||
--key tls.key \
|
||||
--cert tls.crt
|
||||
```
|
||||
|
||||
When doing client auth, CA can come optionally from the same secret:
|
||||
|
|
@ -1231,8 +1236,7 @@ spec:
|
|||
|
||||
Optionally, the CA can be provided by a different secret:
|
||||
```sh
|
||||
kubectl create secret generic pg-tls-ca \
|
||||
--from-file=ca.crt=ca.crt
|
||||
kubectl create secret generic pg-tls-ca --from-file=ca.crt=ca.crt
|
||||
```
|
||||
|
||||
Then configure the postgres resource with the TLS secret:
|
||||
|
|
@ -1255,3 +1259,16 @@ Alternatively, it is also possible to use
|
|||
|
||||
Certificate rotation is handled in the Spilo image which checks every 5
|
||||
minutes if the certificates have changed and reloads postgres accordingly.
|
||||
|
||||
### TLS certificates for connection pooler
|
||||
|
||||
By default, the pgBouncer image generates its own TLS certificate like Spilo.
|
||||
When the `tls` section is specfied in the manifest it will be used for the
|
||||
connection pooler pod(s) as well. The security context options are hard coded
|
||||
to `runAsUser: 100` and `runAsGroup: 101`. The `fsGroup` will be the same
|
||||
like for Spilo.
|
||||
|
||||
As of now, the operator does not sync the pooler deployment automatically
|
||||
which means that changes in the pod template are not caught. You need to
|
||||
toggle `enableConnectionPooler` to set environment variables, volumes, secret
|
||||
mounts and securityContext required for TLS support in the pooler pod.
|
||||
|
|
|
|||
|
|
@ -29,10 +29,12 @@ default: tools
|
|||
|
||||
clean:
|
||||
rm -rf manifests
|
||||
rm -rf tls
|
||||
|
||||
copy: clean
|
||||
mkdir manifests
|
||||
cp -r ../manifests .
|
||||
mkdir tls
|
||||
|
||||
docker: scm-source.json
|
||||
docker build -t "$(IMAGE):$(TAG)" .
|
||||
|
|
|
|||
|
|
@ -55,6 +55,10 @@ function set_kind_api_server_ip(){
|
|||
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
|
||||
}
|
||||
|
||||
function generate_certificate(){
|
||||
openssl req -x509 -nodes -newkey rsa:2048 -keyout tls/tls.key -out tls/tls.crt -subj "/CN=acid.zalan.do"
|
||||
}
|
||||
|
||||
function run_tests(){
|
||||
echo "Running tests... image: ${e2e_test_runner_image}"
|
||||
# tests modify files in ./manifests, so we mount a copy of this directory done by the e2e Makefile
|
||||
|
|
@ -62,6 +66,7 @@ function run_tests(){
|
|||
docker run --rm --network=host -e "TERM=xterm-256color" \
|
||||
--mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \
|
||||
--mount type=bind,source="$(readlink -f manifests)",target=/manifests \
|
||||
--mount type=bind,source="$(readlink -f tls)",target=/tls \
|
||||
--mount type=bind,source="$(readlink -f tests)",target=/tests \
|
||||
--mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \
|
||||
--mount type=bind,source="$(readlink -f scripts)",target=/scripts \
|
||||
|
|
@ -82,6 +87,7 @@ function main(){
|
|||
[[ ! -f ${kubeconfig_path} ]] && start_kind
|
||||
load_operator_image
|
||||
set_kind_api_server_ip
|
||||
generate_certificate
|
||||
|
||||
shift
|
||||
run_tests $@
|
||||
|
|
|
|||
|
|
@ -156,6 +156,26 @@ class K8s:
|
|||
while not get_services():
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
def count_pods_with_volume_mount(self, mount_name, labels, namespace='default'):
|
||||
pod_count = 0
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
for pod in pods:
|
||||
for mount in pod.spec.containers[0].volume_mounts:
|
||||
if mount.name == mount_name:
|
||||
pod_count += 1
|
||||
|
||||
return pod_count
|
||||
|
||||
def count_pods_with_env_variable(self, env_variable_key, labels, namespace='default'):
|
||||
pod_count = 0
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
for pod in pods:
|
||||
for env in pod.spec.containers[0].env:
|
||||
if env.name == env_variable_key:
|
||||
pod_count += 1
|
||||
|
||||
return pod_count
|
||||
|
||||
def count_pods_with_rolling_update_flag(self, labels, namespace='default'):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods)))
|
||||
|
|
@ -241,6 +261,18 @@ class K8s:
|
|||
def patch_pod(self, data, pod_name, namespace="default"):
|
||||
self.api.core_v1.patch_namespaced_pod(pod_name, namespace, data)
|
||||
|
||||
def create_tls_secret_with_kubectl(self, secret_name):
|
||||
return subprocess.run(
|
||||
["kubectl", "create", "secret", "tls", secret_name, "--key=tls/tls.key", "--cert=tls/tls.crt"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
def create_tls_ca_secret_with_kubectl(self, secret_name):
|
||||
return subprocess.run(
|
||||
["kubectl", "create", "secret", "generic", secret_name, "--from-file=ca.crt=tls/ca.crt"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
def create_with_kubectl(self, path):
|
||||
return subprocess.run(
|
||||
["kubectl", "apply", "-f", path],
|
||||
|
|
|
|||
|
|
@ -622,6 +622,49 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label("cluster-name=acid-minimal-cluster,application=spilo", self.test_namespace),
|
||||
1, "Secret not created for user in namespace")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_custom_ssl_certificate(self):
|
||||
'''
|
||||
Test if spilo uses a custom SSL certificate
|
||||
'''
|
||||
|
||||
k8s = self.k8s
|
||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||
tls_secret = "pg-tls"
|
||||
|
||||
# get nodes of master and replica(s) (expected target of new master)
|
||||
_, replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
self.assertNotEqual(replica_nodes, [])
|
||||
|
||||
try:
|
||||
# create secret containing ssl certificate
|
||||
result = self.k8s.create_tls_secret_with_kubectl(tls_secret)
|
||||
print("stdout: {}, stderr: {}".format(result.stdout, result.stderr))
|
||||
|
||||
# enable load balancer services
|
||||
pg_patch_tls = {
|
||||
"spec": {
|
||||
"spiloFSGroup": 103,
|
||||
"tls": {
|
||||
"secretName": tls_secret
|
||||
}
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_tls)
|
||||
|
||||
# wait for switched over
|
||||
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("SSL_CERTIFICATE_FILE", cluster_label), 2, "TLS env variable SSL_CERTIFICATE_FILE missing in Spilo pods")
|
||||
self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("SSL_PRIVATE_KEY_FILE", cluster_label), 2, "TLS env variable SSL_PRIVATE_KEY_FILE missing in Spilo pods")
|
||||
self.eventuallyEqual(lambda: k8s.count_pods_with_volume_mount(tls_secret, cluster_label), 2, "TLS volume mount missing in Spilo pods")
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_enable_disable_connection_pooler(self):
|
||||
'''
|
||||
|
|
@ -653,6 +696,11 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label), 2, "No pooler service found")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label), 1, "Pooler secret not created")
|
||||
|
||||
# TLS still enabled so check existing env variables and volume mounts
|
||||
self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("CONNECTION_POOLER_CLIENT_TLS_CRT", pooler_label), 4, "TLS env variable CONNECTION_POOLER_CLIENT_TLS_CRT missing in pooler pods")
|
||||
self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("CONNECTION_POOLER_CLIENT_TLS_KEY", pooler_label), 4, "TLS env variable CONNECTION_POOLER_CLIENT_TLS_KEY missing in pooler pods")
|
||||
self.eventuallyEqual(lambda: k8s.count_pods_with_volume_mount("pg-tls", pooler_label), 4, "TLS volume mount missing in pooler pods")
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ data:
|
|||
# connection_pooler_default_cpu_request: "500m"
|
||||
# connection_pooler_default_memory_limit: 100Mi
|
||||
# connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
# connection_pooler_mode: "transaction"
|
||||
# connection_pooler_number_of_instances: 2
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/pgbouncer:master-26
|
||||
image: registry.opensource.zalan.do/acid/pgbouncer:master-27
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -635,7 +635,7 @@ spec:
|
|||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
default: 60
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ configuration:
|
|||
connection_pooler_default_cpu_request: "500m"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
connection_pooler_mode: "transaction"
|
||||
connection_pooler_number_of_instances: 2
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package cluster
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -25,6 +24,9 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
||||
)
|
||||
|
||||
var poolerRunAsUser = int64(100)
|
||||
var poolerRunAsGroup = int64(101)
|
||||
|
||||
// ConnectionPoolerObjects K8s objects that are belong to connection pooler
|
||||
type ConnectionPoolerObjects struct {
|
||||
Deployment *appsv1.Deployment
|
||||
|
|
@ -261,6 +263,10 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
makeDefaultConnectionPoolerResources(&c.OpConfig),
|
||||
connectionPoolerContainer)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||
}
|
||||
|
||||
effectiveDockerImage := util.Coalesce(
|
||||
connectionPoolerSpec.DockerImage,
|
||||
c.OpConfig.ConnectionPooler.Image)
|
||||
|
|
@ -269,10 +275,6 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
connectionPoolerSpec.Schema,
|
||||
c.OpConfig.ConnectionPooler.Schema)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||
}
|
||||
|
||||
secretSelector := func(key string) *v1.SecretKeySelector {
|
||||
effectiveUser := util.Coalesce(
|
||||
connectionPoolerSpec.User,
|
||||
|
|
@ -344,62 +346,53 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
// 2. Reference the secret in a volume
|
||||
// 3. Mount the volume to the container at /tls
|
||||
var poolerVolumes []v1.Volume
|
||||
var volumeMounts []v1.VolumeMount
|
||||
if spec.TLS != nil && spec.TLS.SecretName != "" {
|
||||
// Env vars
|
||||
crtFile := spec.TLS.CertificateFile
|
||||
keyFile := spec.TLS.PrivateKeyFile
|
||||
caFile := spec.TLS.CAFile
|
||||
mountPath := "/tls"
|
||||
mountPathCA := mountPath
|
||||
getPoolerTLSEnv := func(k string) string {
|
||||
keyName := ""
|
||||
switch k {
|
||||
case "tls.crt":
|
||||
keyName = "CONNECTION_POOLER_CLIENT_TLS_CRT"
|
||||
case "tls.key":
|
||||
keyName = "CONNECTION_POOLER_CLIENT_TLS_KEY"
|
||||
case "tls.ca":
|
||||
keyName = "CONNECTION_POOLER_CLIENT_CA_FILE"
|
||||
default:
|
||||
panic(fmt.Sprintf("TLS env key for pooler unknown %s", k))
|
||||
}
|
||||
|
||||
if crtFile == "" {
|
||||
crtFile = "tls.crt"
|
||||
return keyName
|
||||
}
|
||||
if keyFile == "" {
|
||||
keyFile = "tls.key"
|
||||
tlsEnv, tlsVolumes := generateTlsMounts(spec, getPoolerTLSEnv)
|
||||
envVars = append(envVars, tlsEnv...)
|
||||
for _, vol := range tlsVolumes {
|
||||
poolerVolumes = append(poolerVolumes, v1.Volume{
|
||||
Name: vol.Name,
|
||||
VolumeSource: vol.VolumeSource,
|
||||
})
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
||||
Name: vol.Name,
|
||||
MountPath: vol.MountPath,
|
||||
})
|
||||
}
|
||||
if caFile == "" {
|
||||
caFile = "ca.crt"
|
||||
}
|
||||
if spec.TLS.CASecretName != "" {
|
||||
mountPathCA = mountPath + "ca"
|
||||
}
|
||||
|
||||
envVars = append(
|
||||
envVars,
|
||||
v1.EnvVar{
|
||||
Name: "CONNECTION_POOLER_CLIENT_TLS_CRT", Value: filepath.Join(mountPath, crtFile),
|
||||
},
|
||||
v1.EnvVar{
|
||||
Name: "CONNECTION_POOLER_CLIENT_TLS_KEY", Value: filepath.Join(mountPath, keyFile),
|
||||
},
|
||||
v1.EnvVar{
|
||||
Name: "CONNECTION_POOLER_CLIENT_CA_FILE", Value: filepath.Join(mountPathCA, caFile),
|
||||
},
|
||||
)
|
||||
|
||||
// Volume
|
||||
mode := int32(0640)
|
||||
volume := v1.Volume{
|
||||
Name: "tls",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.SecretName,
|
||||
DefaultMode: &mode,
|
||||
},
|
||||
},
|
||||
}
|
||||
poolerVolumes = append(poolerVolumes, volume)
|
||||
|
||||
// Mount
|
||||
poolerContainer.VolumeMounts = []v1.VolumeMount{{
|
||||
Name: "tls",
|
||||
MountPath: "/tls",
|
||||
}}
|
||||
}
|
||||
|
||||
poolerContainer.Env = envVars
|
||||
poolerContainer.VolumeMounts = volumeMounts
|
||||
tolerationsSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
||||
securityContext := v1.PodSecurityContext{}
|
||||
|
||||
// determine the User, Group and FSGroup for the pooler pod
|
||||
securityContext.RunAsUser = &poolerRunAsUser
|
||||
securityContext.RunAsGroup = &poolerRunAsGroup
|
||||
|
||||
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
|
||||
if spec.SpiloFSGroup != nil {
|
||||
effectiveFSGroup = spec.SpiloFSGroup
|
||||
}
|
||||
if effectiveFSGroup != nil {
|
||||
securityContext.FSGroup = effectiveFSGroup
|
||||
}
|
||||
|
||||
podTemplate := &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -412,15 +405,10 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
Containers: []v1.Container{poolerContainer},
|
||||
Tolerations: tolerationsSpec,
|
||||
Volumes: poolerVolumes,
|
||||
SecurityContext: &securityContext,
|
||||
},
|
||||
}
|
||||
|
||||
if spec.TLS != nil && spec.TLS.SecretName != "" && spec.SpiloFSGroup != nil {
|
||||
podTemplate.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: spec.SpiloFSGroup,
|
||||
}
|
||||
}
|
||||
|
||||
nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity)
|
||||
if c.OpConfig.EnablePodAntiAffinity {
|
||||
labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
|
@ -11,6 +12,7 @@ import (
|
|||
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
|
|
@ -19,6 +21,19 @@ import (
|
|||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func newFakeK8sPoolerTestClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||
acidClientSet := fakeacidv1.NewSimpleClientset()
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
|
||||
return k8sutil.KubernetesClient{
|
||||
PodsGetter: clientSet.CoreV1(),
|
||||
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||
StatefulSetsGetter: clientSet.AppsV1(),
|
||||
DeploymentsGetter: clientSet.AppsV1(),
|
||||
ServicesGetter: clientSet.CoreV1(),
|
||||
}, clientSet
|
||||
}
|
||||
|
||||
func mockInstallLookupFunction(schema string, user string) error {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -919,6 +934,122 @@ func testServiceSelector(cluster *Cluster, service *v1.Service, role PostgresRol
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestPoolerTLS(t *testing.T) {
|
||||
client, _ := newFakeK8sPoolerTestClient()
|
||||
clusterName := "acid-test-cluster"
|
||||
namespace := "default"
|
||||
tlsSecretName := "my-secret"
|
||||
spiloFSGroup := int64(103)
|
||||
defaultMode := int32(0640)
|
||||
mountPath := "/tls"
|
||||
|
||||
pg := acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
EnableConnectionPooler: util.True(),
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
TLS: &acidv1.TLSDescription{
|
||||
SecretName: tlsSecretName, CAFile: "ca.crt"},
|
||||
AdditionalVolumes: []acidv1.AdditionalVolume{
|
||||
acidv1.AdditionalVolume{
|
||||
Name: tlsSecretName,
|
||||
MountPath: mountPath,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: tlsSecretName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
DefaultCPURequest: "300m",
|
||||
DefaultCPULimit: "300m",
|
||||
DefaultMemoryRequest: "300Mi",
|
||||
DefaultMemoryLimit: "300Mi",
|
||||
PodRoleLabel: "spilo-role",
|
||||
SpiloFSGroup: &spiloFSGroup,
|
||||
},
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, client, pg, logger, eventRecorder)
|
||||
|
||||
// create a statefulset
|
||||
_, err := cluster.createStatefulSet()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create pooler resources
|
||||
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{}
|
||||
cluster.ConnectionPooler[Master] = &ConnectionPoolerObjects{
|
||||
Deployment: nil,
|
||||
Service: nil,
|
||||
Name: cluster.connectionPoolerName(Master),
|
||||
ClusterName: clusterName,
|
||||
Namespace: namespace,
|
||||
LookupFunction: false,
|
||||
Role: Master,
|
||||
}
|
||||
|
||||
_, err = cluster.syncConnectionPoolerWorker(nil, &pg, Master)
|
||||
assert.NoError(t, err)
|
||||
|
||||
deploy, err := client.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(Master), metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
fsGroup := int64(103)
|
||||
assert.Equal(t, &fsGroup, deploy.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
|
||||
|
||||
volume := v1.Volume{
|
||||
Name: "my-secret",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: "my-secret",
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Contains(t, deploy.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
|
||||
|
||||
poolerContainer := deploy.Spec.Template.Spec.Containers[constants.ConnectionPoolerContainer]
|
||||
assert.Contains(t, poolerContainer.VolumeMounts, v1.VolumeMount{
|
||||
MountPath: "/tls",
|
||||
Name: "my-secret",
|
||||
}, "the volume gets mounted in /tls")
|
||||
|
||||
assert.Contains(t, poolerContainer.Env, v1.EnvVar{Name: "CONNECTION_POOLER_CLIENT_TLS_CRT", Value: "/tls/tls.crt"})
|
||||
assert.Contains(t, poolerContainer.Env, v1.EnvVar{Name: "CONNECTION_POOLER_CLIENT_TLS_KEY", Value: "/tls/tls.key"})
|
||||
assert.Contains(t, poolerContainer.Env, v1.EnvVar{Name: "CONNECTION_POOLER_CLIENT_CA_FILE", Value: "/tls/ca.crt"})
|
||||
}
|
||||
|
||||
func TestConnectionPoolerServiceSpec(t *testing.T) {
|
||||
testName := "Test connection pooler service spec generation"
|
||||
var cluster = New(
|
||||
|
|
|
|||
|
|
@ -1288,57 +1288,26 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
|
||||
// configure TLS with a custom secret volume
|
||||
if spec.TLS != nil && spec.TLS.SecretName != "" {
|
||||
// this is combined with the FSGroup in the section above
|
||||
// to give read access to the postgres user
|
||||
defaultMode := int32(0640)
|
||||
mountPath := "/tls"
|
||||
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
|
||||
Name: spec.TLS.SecretName,
|
||||
MountPath: mountPath,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.SecretName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// use the same filenames as Secret resources by default
|
||||
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt")
|
||||
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key")
|
||||
spiloEnvVars = appendEnvVars(
|
||||
spiloEnvVars,
|
||||
v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: certFile},
|
||||
v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: privateKeyFile},
|
||||
)
|
||||
|
||||
if spec.TLS.CAFile != "" {
|
||||
// support scenario when the ca.crt resides in a different secret, diff path
|
||||
mountPathCA := mountPath
|
||||
if spec.TLS.CASecretName != "" {
|
||||
mountPathCA = mountPath + "ca"
|
||||
getSpiloTLSEnv := func(k string) string {
|
||||
keyName := ""
|
||||
switch k {
|
||||
case "tls.crt":
|
||||
keyName = "SSL_CERTIFICATE_FILE"
|
||||
case "tls.key":
|
||||
keyName = "SSL_PRIVATE_KEY_FILE"
|
||||
case "tls.ca":
|
||||
keyName = "SSL_CA_FILE"
|
||||
default:
|
||||
panic(fmt.Sprintf("TLS env key unknown %s", k))
|
||||
}
|
||||
|
||||
caFile := ensurePath(spec.TLS.CAFile, mountPathCA, "")
|
||||
spiloEnvVars = appendEnvVars(
|
||||
spiloEnvVars,
|
||||
v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile},
|
||||
)
|
||||
|
||||
// the ca file from CASecretName secret takes priority
|
||||
if spec.TLS.CASecretName != "" {
|
||||
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
|
||||
Name: spec.TLS.CASecretName,
|
||||
MountPath: mountPathCA,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.CASecretName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return keyName
|
||||
}
|
||||
tlsEnv, tlsVolumes := generateTlsMounts(spec, getSpiloTLSEnv)
|
||||
for _, env := range tlsEnv {
|
||||
spiloEnvVars = appendEnvVars(spiloEnvVars, env)
|
||||
}
|
||||
additionalVolumes = append(additionalVolumes, tlsVolumes...)
|
||||
}
|
||||
|
||||
// generate the spilo container
|
||||
|
|
@ -1492,6 +1461,59 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
return statefulSet, nil
|
||||
}
|
||||
|
||||
func generateTlsMounts(spec *acidv1.PostgresSpec, tlsEnv func(key string) string) ([]v1.EnvVar, []acidv1.AdditionalVolume) {
|
||||
// this is combined with the FSGroup in the section above
|
||||
// to give read access to the postgres user
|
||||
defaultMode := int32(0640)
|
||||
mountPath := "/tls"
|
||||
env := make([]v1.EnvVar, 0)
|
||||
volumes := make([]acidv1.AdditionalVolume, 0)
|
||||
|
||||
volumes = append(volumes, acidv1.AdditionalVolume{
|
||||
Name: spec.TLS.SecretName,
|
||||
MountPath: mountPath,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.SecretName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// use the same filenames as Secret resources by default
|
||||
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt")
|
||||
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key")
|
||||
env = append(env, v1.EnvVar{Name: tlsEnv("tls.crt"), Value: certFile})
|
||||
env = append(env, v1.EnvVar{Name: tlsEnv("tls.key"), Value: privateKeyFile})
|
||||
|
||||
if spec.TLS.CAFile != "" {
|
||||
// support scenario when the ca.crt resides in a different secret, diff path
|
||||
mountPathCA := mountPath
|
||||
if spec.TLS.CASecretName != "" {
|
||||
mountPathCA = mountPath + "ca"
|
||||
}
|
||||
|
||||
caFile := ensurePath(spec.TLS.CAFile, mountPathCA, "")
|
||||
env = append(env, v1.EnvVar{Name: tlsEnv("tls.ca"), Value: caFile})
|
||||
|
||||
// the ca file from CASecretName secret takes priority
|
||||
if spec.TLS.CASecretName != "" {
|
||||
volumes = append(volumes, acidv1.AdditionalVolume{
|
||||
Name: spec.TLS.CASecretName,
|
||||
MountPath: mountPathCA,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.CASecretName,
|
||||
DefaultMode: &defaultMode,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return env, volumes
|
||||
}
|
||||
|
||||
func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
for k, v := range c.OpConfig.CustomPodAnnotations {
|
||||
|
|
|
|||
Loading…
Reference in New Issue