Set user and group in security context (#1083)
* Set user and group in security context
This commit is contained in:
parent
d8884a4003
commit
d09e418b56
|
|
@ -200,6 +200,10 @@ spec:
|
|||
type: string
|
||||
secret_name_template:
|
||||
type: string
|
||||
spilo_runasuser:
|
||||
type: integer
|
||||
spilo_runasgroup:
|
||||
type: integer
|
||||
spilo_fsgroup:
|
||||
type: integer
|
||||
spilo_privileged:
|
||||
|
|
|
|||
|
|
@ -374,6 +374,10 @@ spec:
|
|||
items:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
spiloRunAsUser:
|
||||
type: integer
|
||||
spiloRunAsGroup:
|
||||
type: integer
|
||||
spiloFSGroup:
|
||||
type: integer
|
||||
standby:
|
||||
|
|
|
|||
|
|
@ -127,6 +127,9 @@ configKubernetes:
|
|||
pod_terminate_grace_period: 5m
|
||||
# template for database user secrets generated by the operator
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
# set user and group for the spilo container (required to run Spilo as non-root process)
|
||||
# spilo_runasuser: "101"
|
||||
# spilo_runasgroup: "103"
|
||||
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||
# spilo_fsgroup: 103
|
||||
|
||||
|
|
|
|||
|
|
@ -118,6 +118,9 @@ configKubernetes:
|
|||
pod_terminate_grace_period: 5m
|
||||
# template for database user secrets generated by the operator
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
# set user and group for the spilo container (required to run Spilo as non-root process)
|
||||
# spilo_runasuser: "101"
|
||||
# spilo_runasgroup: "103"
|
||||
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||
# spilo_fsgroup: "103"
|
||||
|
||||
|
|
|
|||
|
|
@ -65,6 +65,16 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
custom Docker image that overrides the **docker_image** operator parameter.
|
||||
It should be a [Spilo](https://github.com/zalando/spilo) image. Optional.
|
||||
|
||||
* **spiloRunAsUser**
|
||||
sets the user ID which should be used in the container to run the process.
|
||||
This must be set to run the container without root. By default the container
|
||||
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||
|
||||
* **spiloRunAsGroup**
|
||||
sets the group ID which should be used in the container to run the process.
|
||||
This must be set to run the container without root. By default the container
|
||||
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||
|
||||
* **spiloFSGroup**
|
||||
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
|
||||
writable by the group ID specified. This will override the **spilo_fsgroup**
|
||||
|
|
|
|||
|
|
@ -317,6 +317,16 @@ configuration they are grouped under the `kubernetes` key.
|
|||
that should be assigned to the Postgres pods. The priority class itself must
|
||||
be defined in advance. Default is empty (use the default priority class).
|
||||
|
||||
* **spilo_runasuser**
|
||||
sets the user ID which should be used in the container to run the process.
|
||||
This must be set to run the container without root. By default the container
|
||||
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||
|
||||
* **spilo_runasgroup**
|
||||
sets the group ID which should be used in the container to run the process.
|
||||
This must be set to run the container without root. By default the container
|
||||
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||
|
||||
* **spilo_fsgroup**
|
||||
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
|
||||
writable by the group ID specified. This is required to run Spilo as a
|
||||
|
|
|
|||
|
|
@ -68,6 +68,8 @@ spec:
|
|||
# name: my-config-map
|
||||
|
||||
enableShmVolume: true
|
||||
# spiloRunAsUser: 101
|
||||
# spiloRunAsGroup: 103
|
||||
# spiloFSGroup: 103
|
||||
# podAnnotations:
|
||||
# annotation.key: value
|
||||
|
|
|
|||
|
|
@ -99,6 +99,8 @@ data:
|
|||
secret_name_template: "{username}.{cluster}.credentials"
|
||||
# sidecar_docker_images: ""
|
||||
# set_memory_request_to_limit: "false"
|
||||
# spilo_runasuser: 101
|
||||
# spilo_runasgroup: 103
|
||||
# spilo_fsgroup: 103
|
||||
spilo_privileged: "false"
|
||||
# storage_resize_mode: "off"
|
||||
|
|
|
|||
|
|
@ -196,6 +196,10 @@ spec:
|
|||
type: string
|
||||
secret_name_template:
|
||||
type: string
|
||||
spilo_runasuser:
|
||||
type: integer
|
||||
spilo_runasgroup:
|
||||
type: integer
|
||||
spilo_fsgroup:
|
||||
type: integer
|
||||
spilo_privileged:
|
||||
|
|
|
|||
|
|
@ -68,6 +68,8 @@ configuration:
|
|||
# pod_service_account_role_binding_definition: ""
|
||||
pod_terminate_grace_period: 5m
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
# spilo_runasuser: 101
|
||||
# spilo_runasgroup: 103
|
||||
# spilo_fsgroup: 103
|
||||
spilo_privileged: false
|
||||
storage_resize_mode: ebs
|
||||
|
|
|
|||
|
|
@ -370,6 +370,10 @@ spec:
|
|||
items:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
spiloRunAsUser:
|
||||
type: integer
|
||||
spiloRunAsGroup:
|
||||
type: integer
|
||||
spiloFSGroup:
|
||||
type: integer
|
||||
standby:
|
||||
|
|
|
|||
|
|
@ -519,6 +519,12 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"spiloRunAsUser": {
|
||||
Type: "integer",
|
||||
},
|
||||
"spiloRunAsGroup": {
|
||||
Type: "integer",
|
||||
},
|
||||
"spiloFSGroup": {
|
||||
Type: "integer",
|
||||
},
|
||||
|
|
@ -1018,6 +1024,12 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
"secret_name_template": {
|
||||
Type: "string",
|
||||
},
|
||||
"spilo_runasuser": {
|
||||
Type: "integer",
|
||||
},
|
||||
"spilo_runasgroup": {
|
||||
Type: "integer",
|
||||
},
|
||||
"spilo_fsgroup": {
|
||||
Type: "integer",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -49,6 +49,8 @@ type KubernetesMetaConfiguration struct {
|
|||
PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"`
|
||||
PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"`
|
||||
SpiloPrivileged bool `json:"spilo_privileged,omitempty"`
|
||||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
||||
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
||||
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
||||
|
|
|
|||
|
|
@ -35,7 +35,9 @@ type PostgresSpec struct {
|
|||
TeamID string `json:"teamId"`
|
||||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
|
||||
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
|
||||
SpiloRunAsUser *int64 `json:"spiloRunAsUser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"`
|
||||
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
|
||||
|
||||
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
|
||||
// in that case the var evaluates to nil and the value is taken from the operator config
|
||||
|
|
|
|||
|
|
@ -147,6 +147,16 @@ func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfigurati
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) {
|
||||
*out = *in
|
||||
if in.SpiloRunAsUser != nil {
|
||||
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.SpiloRunAsGroup != nil {
|
||||
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.SpiloFSGroup != nil {
|
||||
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
|
||||
*out = new(int64)
|
||||
|
|
@ -527,6 +537,16 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
*out = new(ConnectionPooler)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.SpiloRunAsUser != nil {
|
||||
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.SpiloRunAsGroup != nil {
|
||||
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.SpiloFSGroup != nil {
|
||||
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
|
||||
*out = new(int64)
|
||||
|
|
|
|||
|
|
@ -557,6 +557,8 @@ func (c *Cluster) generatePodTemplate(
|
|||
initContainers []v1.Container,
|
||||
sidecarContainers []v1.Container,
|
||||
tolerationsSpec *[]v1.Toleration,
|
||||
spiloRunAsUser *int64,
|
||||
spiloRunAsGroup *int64,
|
||||
spiloFSGroup *int64,
|
||||
nodeAffinity *v1.Affinity,
|
||||
terminateGracePeriod int64,
|
||||
|
|
@ -576,6 +578,14 @@ func (c *Cluster) generatePodTemplate(
|
|||
containers = append(containers, sidecarContainers...)
|
||||
securityContext := v1.PodSecurityContext{}
|
||||
|
||||
if spiloRunAsUser != nil {
|
||||
securityContext.RunAsUser = spiloRunAsUser
|
||||
}
|
||||
|
||||
if spiloRunAsGroup != nil {
|
||||
securityContext.RunAsGroup = spiloRunAsGroup
|
||||
}
|
||||
|
||||
if spiloFSGroup != nil {
|
||||
securityContext.FSGroup = spiloFSGroup
|
||||
}
|
||||
|
|
@ -1073,7 +1083,17 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
// pickup the docker image for the spilo container
|
||||
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
|
||||
|
||||
// determine the FSGroup for the spilo pod
|
||||
// determine the User, Group and FSGroup for the spilo pod
|
||||
effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser
|
||||
if spec.SpiloRunAsUser != nil {
|
||||
effectiveRunAsUser = spec.SpiloRunAsUser
|
||||
}
|
||||
|
||||
effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup
|
||||
if spec.SpiloRunAsGroup != nil {
|
||||
effectiveRunAsGroup = spec.SpiloRunAsGroup
|
||||
}
|
||||
|
||||
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
|
||||
if spec.SpiloFSGroup != nil {
|
||||
effectiveFSGroup = spec.SpiloFSGroup
|
||||
|
|
@ -1217,6 +1237,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
initContainers,
|
||||
sidecarContainers,
|
||||
&tolerationSpec,
|
||||
effectiveRunAsUser,
|
||||
effectiveRunAsGroup,
|
||||
effectiveFSGroup,
|
||||
nodeAffinity(c.OpConfig.NodeReadinessLabel),
|
||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||
|
|
@ -1897,6 +1919,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
[]v1.Container{},
|
||||
&[]v1.Toleration{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nodeAffinity(c.OpConfig.NodeReadinessLabel),
|
||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||
c.OpConfig.PodServiceAccountName,
|
||||
|
|
|
|||
|
|
@ -1302,6 +1302,8 @@ func TestTLS(t *testing.T) {
|
|||
var err error
|
||||
var spec acidv1.PostgresSpec
|
||||
var cluster *Cluster
|
||||
var spiloRunAsUser = int64(101)
|
||||
var spiloRunAsGroup = int64(103)
|
||||
var spiloFSGroup = int64(103)
|
||||
var additionalVolumes = spec.AdditionalVolumes
|
||||
|
||||
|
|
@ -1329,7 +1331,9 @@ func TestTLS(t *testing.T) {
|
|||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
Resources: config.Resources{
|
||||
SpiloFSGroup: &spiloFSGroup,
|
||||
SpiloRunAsUser: &spiloRunAsUser,
|
||||
SpiloRunAsGroup: &spiloRunAsGroup,
|
||||
SpiloFSGroup: &spiloFSGroup,
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
|
|
|
|||
|
|
@ -61,6 +61,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.PodEnvironmentSecret = fromCRD.Kubernetes.PodEnvironmentSecret
|
||||
result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m")
|
||||
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
|
||||
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
|
||||
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
|
||||
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
||||
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
||||
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ type Resources struct {
|
|||
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
||||
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
||||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
||||
|
|
|
|||
Loading…
Reference in New Issue