Merge b48e568a39 into a27727f8d0
This commit is contained in:
commit
3b9ceeceef
|
|
@ -354,6 +354,15 @@ spec:
|
||||||
type: integer
|
type: integer
|
||||||
spilo_fsgroup:
|
spilo_fsgroup:
|
||||||
type: integer
|
type: integer
|
||||||
|
spilo_runasnonroot:
|
||||||
|
type: boolean
|
||||||
|
spilo_seccompprofile:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
localhostProfile:
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
spilo_privileged:
|
spilo_privileged:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
|
||||||
|
|
@ -483,6 +483,15 @@ spec:
|
||||||
type: integer
|
type: integer
|
||||||
spiloFSGroup:
|
spiloFSGroup:
|
||||||
type: integer
|
type: integer
|
||||||
|
spiloRunAsNonRoot:
|
||||||
|
type: boolean
|
||||||
|
spiloSeccompProfile:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
localhostProfile:
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
standby:
|
standby:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
||||||
|
|
@ -216,6 +216,14 @@ configKubernetes:
|
||||||
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||||
# spilo_fsgroup: 103
|
# spilo_fsgroup: 103
|
||||||
|
|
||||||
|
# sets runAsNonRoot in the security context. If this is set you also must set spilo_runasuser.
|
||||||
|
# spilo_runasnonroot: true
|
||||||
|
|
||||||
|
# sets seccompProfile in the security context
|
||||||
|
# spilo_seccompprofile:
|
||||||
|
# type: Localhost
|
||||||
|
# localhostProfile: profiles/audit.json
|
||||||
|
|
||||||
# whether the Spilo container should run in privileged mode
|
# whether the Spilo container should run in privileged mode
|
||||||
spilo_privileged: false
|
spilo_privileged: false
|
||||||
# whether the Spilo container should run with additional permissions other than parent.
|
# whether the Spilo container should run with additional permissions other than parent.
|
||||||
|
|
|
||||||
|
|
@ -85,6 +85,13 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
||||||
requires a custom Spilo image. Note the FSGroup of a Pod cannot be changed
|
requires a custom Spilo image. Note the FSGroup of a Pod cannot be changed
|
||||||
without recreating a new Pod. Optional.
|
without recreating a new Pod. Optional.
|
||||||
|
|
||||||
|
* **spiloRunAsNonRoot**
|
||||||
|
boolean flag to set `runAsNonRoot` in the pod security context. If this is set
|
||||||
|
then `spiloRunAsUser` must also be set. Optional.
|
||||||
|
|
||||||
|
* **spiloSeccompProfile**
|
||||||
|
sets the `seccompProfile` in the pod security context. Optional.
|
||||||
|
|
||||||
* **enableMasterLoadBalancer**
|
* **enableMasterLoadBalancer**
|
||||||
boolean flag to override the operator defaults (set by the
|
boolean flag to override the operator defaults (set by the
|
||||||
`enable_master_load_balancer` parameter) to define whether to enable the load
|
`enable_master_load_balancer` parameter) to define whether to enable the load
|
||||||
|
|
|
||||||
|
|
@ -509,6 +509,13 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
non-root process, but requires a custom Spilo image. Note the FSGroup of a Pod
|
non-root process, but requires a custom Spilo image. Note the FSGroup of a Pod
|
||||||
cannot be changed without recreating a new Pod.
|
cannot be changed without recreating a new Pod.
|
||||||
|
|
||||||
|
* **spilo_runasnonroot**
|
||||||
|
boolean flag to set `runAsNonRoot` in the Spilo pod security context. If this
|
||||||
|
is set then `spilo_runasuser` must also be set. Optional.
|
||||||
|
|
||||||
|
* **spilo_seccompprofile**
|
||||||
|
sets the `seccompProfile` in the Spilo pod security context. Optional.
|
||||||
|
|
||||||
* **spilo_privileged**
|
* **spilo_privileged**
|
||||||
whether the Spilo container should run in privileged mode. Privileged mode is
|
whether the Spilo container should run in privileged mode. Privileged mode is
|
||||||
used for AWS volume resizing and not required if you don't need that
|
used for AWS volume resizing and not required if you don't need that
|
||||||
|
|
|
||||||
|
|
@ -109,6 +109,10 @@ configuration:
|
||||||
# spilo_runasuser: 101
|
# spilo_runasuser: 101
|
||||||
# spilo_runasgroup: 103
|
# spilo_runasgroup: 103
|
||||||
# spilo_fsgroup: 103
|
# spilo_fsgroup: 103
|
||||||
|
# spilo_runasnonroot: true
|
||||||
|
# spilo_seccompprofile:
|
||||||
|
# type: Localhost
|
||||||
|
# localhostProfile: profiles/audit.json
|
||||||
spilo_privileged: false
|
spilo_privileged: false
|
||||||
storage_resize_mode: pvc
|
storage_resize_mode: pvc
|
||||||
# toleration:
|
# toleration:
|
||||||
|
|
|
||||||
|
|
@ -481,6 +481,15 @@ spec:
|
||||||
type: integer
|
type: integer
|
||||||
spiloFSGroup:
|
spiloFSGroup:
|
||||||
type: integer
|
type: integer
|
||||||
|
spiloRunAsNonRoot:
|
||||||
|
type: boolean
|
||||||
|
spiloSeccompProfile:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
localhostProfile:
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
standby:
|
standby:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
||||||
|
|
@ -751,6 +751,20 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"spiloFSGroup": {
|
"spiloFSGroup": {
|
||||||
Type: "integer",
|
Type: "integer",
|
||||||
},
|
},
|
||||||
|
"spiloRunAsNonRoot": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
|
"spiloSeccompProfile": {
|
||||||
|
Type: "object",
|
||||||
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
"localhostProfile": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"standby": {
|
"standby": {
|
||||||
Type: "object",
|
Type: "object",
|
||||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
|
@ -1525,6 +1539,20 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"spilo_fsgroup": {
|
"spilo_fsgroup": {
|
||||||
Type: "integer",
|
Type: "integer",
|
||||||
},
|
},
|
||||||
|
"spilo_runasnonroot": {
|
||||||
|
Type: "boolean",
|
||||||
|
},
|
||||||
|
"spilo_seccompprofile": {
|
||||||
|
Type: "object",
|
||||||
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
"localhostProfile": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"spilo_privileged": {
|
"spilo_privileged": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -66,6 +66,8 @@ type KubernetesMetaConfiguration struct {
|
||||||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||||
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
||||||
|
SpiloRunAsNonRoot *bool `json:"spilo_runasnonroot,omitempty"`
|
||||||
|
SpiloSeccompProfile *v1.SeccompProfile `json:"spilo_seccompprofile,omitempty"`
|
||||||
AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"`
|
AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"`
|
||||||
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
||||||
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -43,6 +43,9 @@ type PostgresSpec struct {
|
||||||
SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"`
|
SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"`
|
||||||
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
|
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
|
||||||
|
|
||||||
|
SpiloRunAsNonRoot *bool `json:"spiloRunAsNonRoot,omitempty"`
|
||||||
|
SpiloSeccompProfile *v1.SeccompProfile `json:"spiloSeccompProfile,omitempty"`
|
||||||
|
|
||||||
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
|
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
|
||||||
// in that case the var evaluates to nil and the value is taken from the operator config
|
// in that case the var evaluates to nil and the value is taken from the operator config
|
||||||
EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"`
|
EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -183,6 +183,16 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
||||||
*out = new(int64)
|
*out = new(int64)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.SpiloRunAsNonRoot != nil {
|
||||||
|
in, out := &in.SpiloRunAsNonRoot, &out.SpiloRunAsNonRoot
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.SpiloSeccompProfile != nil {
|
||||||
|
in, out := &in.SpiloSeccompProfile, &out.SpiloSeccompProfile
|
||||||
|
*out = new(corev1.SeccompProfile)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
if in.AdditionalPodCapabilities != nil {
|
if in.AdditionalPodCapabilities != nil {
|
||||||
in, out := &in.AdditionalPodCapabilities, &out.AdditionalPodCapabilities
|
in, out := &in.AdditionalPodCapabilities, &out.AdditionalPodCapabilities
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
|
|
@ -688,6 +698,16 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
*out = new(int64)
|
*out = new(int64)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.SpiloRunAsNonRoot != nil {
|
||||||
|
in, out := &in.SpiloRunAsNonRoot, &out.SpiloRunAsNonRoot
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.SpiloSeccompProfile != nil {
|
||||||
|
in, out := &in.SpiloSeccompProfile, &out.SpiloSeccompProfile
|
||||||
|
*out = new(corev1.SeccompProfile)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
if in.EnableMasterLoadBalancer != nil {
|
if in.EnableMasterLoadBalancer != nil {
|
||||||
in, out := &in.EnableMasterLoadBalancer, &out.EnableMasterLoadBalancer
|
in, out := &in.EnableMasterLoadBalancer, &out.EnableMasterLoadBalancer
|
||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
|
|
|
||||||
|
|
@ -813,6 +813,8 @@ func (c *Cluster) generatePodTemplate(
|
||||||
spiloRunAsUser *int64,
|
spiloRunAsUser *int64,
|
||||||
spiloRunAsGroup *int64,
|
spiloRunAsGroup *int64,
|
||||||
spiloFSGroup *int64,
|
spiloFSGroup *int64,
|
||||||
|
spiloRunAsNonRoot *bool,
|
||||||
|
spiloSeccompProfile *v1.SeccompProfile,
|
||||||
nodeAffinity *v1.Affinity,
|
nodeAffinity *v1.Affinity,
|
||||||
schedulerName *string,
|
schedulerName *string,
|
||||||
terminateGracePeriod int64,
|
terminateGracePeriod int64,
|
||||||
|
|
@ -845,6 +847,14 @@ func (c *Cluster) generatePodTemplate(
|
||||||
securityContext.FSGroup = spiloFSGroup
|
securityContext.FSGroup = spiloFSGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if spiloRunAsNonRoot != nil {
|
||||||
|
securityContext.RunAsNonRoot = spiloRunAsNonRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
if spiloSeccompProfile != nil {
|
||||||
|
securityContext.SeccompProfile = spiloSeccompProfile
|
||||||
|
}
|
||||||
|
|
||||||
podSpec := v1.PodSpec{
|
podSpec := v1.PodSpec{
|
||||||
ServiceAccountName: podServiceAccountName,
|
ServiceAccountName: podServiceAccountName,
|
||||||
TerminationGracePeriodSeconds: &terminateGracePeriodSeconds,
|
TerminationGracePeriodSeconds: &terminateGracePeriodSeconds,
|
||||||
|
|
@ -1360,6 +1370,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
effectiveFSGroup = spec.SpiloFSGroup
|
effectiveFSGroup = spec.SpiloFSGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
|
effectiveRunAsNonRoot := c.OpConfig.Resources.SpiloRunAsNonRoot
|
||||||
|
if spec.SpiloRunAsNonRoot != nil {
|
||||||
|
effectiveRunAsNonRoot = spec.SpiloRunAsNonRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
effectiveSeccompProfile := c.OpConfig.Resources.SpiloSeccompProfile
|
||||||
|
if spec.SpiloSeccompProfile != nil {
|
||||||
|
effectiveSeccompProfile = spec.SpiloSeccompProfile
|
||||||
|
}
|
||||||
|
|
||||||
volumeMounts := generateVolumeMounts(spec.Volume)
|
volumeMounts := generateVolumeMounts(spec.Volume)
|
||||||
|
|
||||||
// configure TLS with a custom secret volume
|
// configure TLS with a custom secret volume
|
||||||
|
|
@ -1480,6 +1500,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
effectiveRunAsUser,
|
effectiveRunAsUser,
|
||||||
effectiveRunAsGroup,
|
effectiveRunAsGroup,
|
||||||
effectiveFSGroup,
|
effectiveFSGroup,
|
||||||
|
effectiveRunAsNonRoot,
|
||||||
|
effectiveSeccompProfile,
|
||||||
c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity),
|
c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity),
|
||||||
spec.SchedulerName,
|
spec.SchedulerName,
|
||||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||||
|
|
@ -2368,6 +2390,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) {
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
c.nodeAffinity(c.OpConfig.NodeReadinessLabel, nil),
|
c.nodeAffinity(c.OpConfig.NodeReadinessLabel, nil),
|
||||||
nil,
|
nil,
|
||||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||||
|
|
|
||||||
|
|
@ -4169,3 +4169,123 @@ func TestGenerateCapabilities(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGenerateSeccompProfile(t *testing.T) {
|
||||||
|
mockClient, _ := newFakeK8sTestClient()
|
||||||
|
|
||||||
|
spiloSeccompProfile := v1.SeccompProfile{
|
||||||
|
Type: "Localhost",
|
||||||
|
LocalhostProfile: k8sutil.StringToPointer("profiles/audit.json"),
|
||||||
|
}
|
||||||
|
|
||||||
|
postgresql := acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "acid-test-cluster",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
TeamID: "myapp",
|
||||||
|
NumberOfInstances: 1,
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
|
},
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testCluster := New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
Resources: config.Resources{
|
||||||
|
SpiloSeccompProfile: &spiloSeccompProfile,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, mockClient, postgresql, logger, eventRecorder)
|
||||||
|
|
||||||
|
// create a statefulset
|
||||||
|
sts, err := testCluster.createStatefulSet()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, spiloSeccompProfile.Type, sts.Spec.Template.Spec.SecurityContext.SeccompProfile.Type, "SeccompProfile.Type matches")
|
||||||
|
assert.Equal(t, *spiloSeccompProfile.LocalhostProfile, *sts.Spec.Template.Spec.SecurityContext.SeccompProfile.LocalhostProfile, "SeccompProfile.LocalhostProfile matches")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateEmptySeccompProfile(t *testing.T) {
|
||||||
|
mockClient, _ := newFakeK8sTestClient()
|
||||||
|
|
||||||
|
postgresql := acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "acid-test-cluster",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
TeamID: "myapp",
|
||||||
|
NumberOfInstances: 1,
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
|
},
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testCluster := New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
Resources: config.Resources{},
|
||||||
|
},
|
||||||
|
}, mockClient, postgresql, logger, eventRecorder)
|
||||||
|
|
||||||
|
// create a statefulset
|
||||||
|
sts, err := testCluster.createStatefulSet()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Nil(t, sts.Spec.Template.Spec.SecurityContext.SeccompProfile, "SeccompProfile not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateRunAsNonRoot(t *testing.T) {
|
||||||
|
mockClient, _ := newFakeK8sTestClient()
|
||||||
|
|
||||||
|
postgresql := acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "acid-test-cluster",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
TeamID: "myapp",
|
||||||
|
NumberOfInstances: 1,
|
||||||
|
Resources: &acidv1.Resources{
|
||||||
|
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
|
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
|
||||||
|
},
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1G",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
runAsNonRoot := true
|
||||||
|
|
||||||
|
testCluster := New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
Resources: config.Resources{
|
||||||
|
SpiloRunAsNonRoot: &runAsNonRoot,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, mockClient, postgresql, logger, eventRecorder)
|
||||||
|
|
||||||
|
// create a statefulset
|
||||||
|
sts, err := testCluster.createStatefulSet()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, true, *sts.Spec.Template.Spec.SecurityContext.RunAsNonRoot, "RunAsNonRoot set")
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -79,6 +79,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
|
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
|
||||||
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
|
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
|
||||||
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
||||||
|
result.SpiloRunAsNonRoot = fromCRD.Kubernetes.SpiloRunAsNonRoot
|
||||||
|
result.SpiloSeccompProfile = fromCRD.Kubernetes.SpiloSeccompProfile
|
||||||
result.AdditionalPodCapabilities = fromCRD.Kubernetes.AdditionalPodCapabilities
|
result.AdditionalPodCapabilities = fromCRD.Kubernetes.AdditionalPodCapabilities
|
||||||
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
||||||
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,8 @@ type Resources struct {
|
||||||
SpiloRunAsUser *int64 `name:"spilo_runasuser"`
|
SpiloRunAsUser *int64 `name:"spilo_runasuser"`
|
||||||
SpiloRunAsGroup *int64 `name:"spilo_runasgroup"`
|
SpiloRunAsGroup *int64 `name:"spilo_runasgroup"`
|
||||||
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
||||||
|
SpiloRunAsNonRoot *bool `name:"spilo_runasnonroot"`
|
||||||
|
SpiloSeccompProfile *v1.SeccompProfile `name:"spilo_seccompprofile"`
|
||||||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||||
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
||||||
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
|
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue