configurable container capabilities (#1336)

* configurable container capabilities

* revert change on TestTLS

* fix e2e test

* minor fix
This commit is contained in:
Felix Kunde 2021-01-29 14:54:48 +01:00 committed by GitHub
parent d488ae10a0
commit 12ad8c91fa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 155 additions and 32 deletions

View File

@ -130,6 +130,10 @@ spec:
kubernetes:
type: object
properties:
additional_pod_capabilities:
type: array
items:
type: string
cluster_domain:
type: string
default: "cluster.local"

View File

@ -59,6 +59,10 @@ configUsers:
super_username: postgres
configKubernetes:
# list of additional capabilities for postgres container
# additional_pod_capabilities:
# - "SYS_NICE"
# default DNS domain of K8s cluster where operator is running
cluster_domain: cluster.local
# additional labels assigned to the cluster objects

View File

@ -61,6 +61,9 @@ configUsers:
super_username: postgres
configKubernetes:
# list of additional capabilities for postgres container
# additional_pod_capabilities: "SYS_NICE"
# default DNS domain of K8s cluster where operator is running
cluster_domain: cluster.local
# additional labels assigned to the cluster objects

View File

@ -351,6 +351,12 @@ configuration they are grouped under the `kubernetes` key.
used for AWS volume resizing and not required if you don't need that
capability. The default is `false`.
* **additional_pod_capabilities**
list of additional capabilities to be added to the postgres container's
SecurityContext (e.g. SYS_NICE etc.). Please, make sure first that the
PodSecruityPolicy allows the capabilities listed here. Otherwise, the
container will not start. The default is empty.
* **master_pod_move_timeout**
The period of time to wait for the success of migration of master pods from
an unschedulable node. The migration includes Patroni switchovers to

View File

@ -182,6 +182,10 @@ class K8s:
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
def count_pods_with_container_capabilities(self, capabilities, labels, namespace='default'):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: x.spec.containers[0].security_context.capabilities.add == capabilities, pods)))
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
pod_phase = 'Failing over'
new_pod_node = ''
@ -433,6 +437,10 @@ class K8sBase:
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
def count_pods_with_container_capabilities(self, capabilities, labels, namespace='default'):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: x.spec.containers[0].security_context.capabilities.add == capabilities, pods)))
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
pod_phase = 'Failing over'
new_pod_node = ''

View File

@ -155,6 +155,25 @@ class EndToEndTestCase(unittest.TestCase):
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_additional_pod_capabilities(self):
'''
Extend postgres container capabilities
'''
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
capabilities = ["SYS_NICE","CHOWN"]
patch_capabilities = {
"data": {
"additional_pod_capabilities": ','.join(capabilities),
},
}
self.k8s.update_config(patch_capabilities)
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"},
"Operator does not get in sync")
self.eventuallyEqual(lambda: self.k8s.count_pods_with_container_capabilities(capabilities, cluster_label),
2, "Container capabilities not updated")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_overwrite_pooler_deployment(self):
self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml")

View File

@ -3,6 +3,7 @@ kind: ConfigMap
metadata:
name: postgres-operator
data:
# additional_pod_capabilities: "SYS_NICE"
# additional_secret_mount: "some-secret-name"
# additional_secret_mount_path: "/some/dir"
api_port: "8080"

View File

@ -126,6 +126,10 @@ spec:
kubernetes:
type: object
properties:
additional_pod_capabilities:
type: array
items:
type: string
cluster_domain:
type: string
default: "cluster.local"

View File

@ -26,6 +26,8 @@ configuration:
replication_username: standby
super_username: postgres
kubernetes:
# additional_pod_capabilities:
# - "SYS_NICE"
cluster_domain: cluster.local
cluster_labels:
application: spilo

View File

@ -968,6 +968,14 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
"kubernetes": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"additional_pod_capabilities": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "string",
},
},
},
"cluster_domain": {
Type: "string",
},

View File

@ -52,6 +52,7 @@ type KubernetesMetaConfiguration struct {
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"`
WatchedNamespace string `json:"watched_namespace,omitempty"`
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`

View File

@ -162,6 +162,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
*out = new(int64)
**out = **in
}
if in.AdditionalPodCapabilities != nil {
in, out := &in.AdditionalPodCapabilities, &out.AdditionalPodCapabilities
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.EnablePodDisruptionBudget != nil {
in, out := &in.EnablePodDisruptionBudget, &out.EnablePodDisruptionBudget
*out = new(bool)

View File

@ -320,6 +320,19 @@ func getLocalAndBoostrapPostgreSQLParameters(parameters map[string]string) (loca
return
}
func generateCapabilities(capabilities []string) v1.Capabilities {
if len(capabilities) > 1 {
additionalCapabilities := []v1.Capability{}
for _, capability := range capabilities {
additionalCapabilities = append(additionalCapabilities, v1.Capability(strings.ToUpper(capability)))
}
return v1.Capabilities{
Add: additionalCapabilities,
}
}
return v1.Capabilities{}
}
func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity {
if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
return nil
@ -430,6 +443,7 @@ func generateContainer(
envVars []v1.EnvVar,
volumeMounts []v1.VolumeMount,
privilegedMode bool,
additionalPodCapabilities v1.Capabilities,
) *v1.Container {
return &v1.Container{
Name: name,
@ -456,6 +470,7 @@ func generateContainer(
AllowPrivilegeEscalation: &privilegedMode,
Privileged: &privilegedMode,
ReadOnlyRootFilesystem: util.False(),
Capabilities: &additionalPodCapabilities,
},
}
}
@ -1148,6 +1163,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
volumeMounts,
c.OpConfig.Resources.SpiloPrivileged,
generateCapabilities(c.OpConfig.AdditionalPodCapabilities),
)
// generate container specs for sidecars specified in the cluster manifest
@ -1901,6 +1917,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
envVars,
[]v1.VolumeMount{},
c.OpConfig.SpiloPrivileged, // use same value as for normal DB pods
v1.Capabilities{},
)
labels := map[string]string{

View File

@ -1489,3 +1489,42 @@ func TestGenerateService(t *testing.T) {
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy)
}
func TestGenerateCapabilities(t *testing.T) {
testName := "TestGenerateCapabilities"
tests := []struct {
subTest string
configured []string
capabilities v1.Capabilities
err error
}{
{
subTest: "no capabilities",
configured: nil,
capabilities: v1.Capabilities{},
err: fmt.Errorf("could not parse capabilities configuration of nil"),
},
{
subTest: "empty capabilities",
configured: []string{},
capabilities: v1.Capabilities{},
err: fmt.Errorf("could not parse empty capabilities configuration"),
},
{
subTest: "configured capabilities",
configured: []string{"SYS_NICE", "CHOWN"},
capabilities: v1.Capabilities{
Add: []v1.Capability{"SYS_NICE", "CHOWN"},
},
err: fmt.Errorf("could not parse empty capabilities configuration"),
},
}
for _, tt := range tests {
caps := generateCapabilities(tt.configured)
if !reflect.DeepEqual(caps, tt.capabilities) {
t.Errorf("%s %s: expected `%v` but got `%v`",
testName, tt.subTest, tt.capabilities, caps)
}
}
}

View File

@ -66,6 +66,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
result.AdditionalPodCapabilities = fromCRD.Kubernetes.AdditionalPodCapabilities
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat

View File

@ -34,6 +34,7 @@ type Resources struct {
PodPriorityClassName string `name:"pod_priority_class_name"`
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
AdditionalPodCapabilities []string `name:"additional_pod_capabilities" default:""`
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
InheritedLabels []string `name:"inherited_labels" default:""`
InheritedAnnotations []string `name:"inherited_annotations" default:""`