feat: Support arbitrarily setting `privileged: true` for runner container (#1383)
Resolves #1282
This commit is contained in:
parent
65a67ee61c
commit
3a7e8c844b
23
README.md
23
README.md
|
|
@ -444,6 +444,17 @@ spec:
|
|||
requests:
|
||||
cpu: "2.0"
|
||||
memory: "4Gi"
|
||||
# This is an advanced configuration. Don't touch it unless you know what you're doing.
|
||||
securityContext:
|
||||
# Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer.
|
||||
# But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container,
|
||||
# just specified `privileged: true` like this.
|
||||
# See https://github.com/actions-runner-controller/actions-runner-controller/issues/1282
|
||||
# Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes
|
||||
# like firecracker and kata. Basically they run containers within dedicated micro vms and so
|
||||
# it's more like you can use `privileged: true` safer with those runtimes.
|
||||
#
|
||||
# privileged: true
|
||||
- name: docker
|
||||
resources:
|
||||
limits:
|
||||
|
|
@ -1138,6 +1149,18 @@ spec:
|
|||
# This must match the name of a RuntimeClass resource available on the cluster.
|
||||
# More info: https://kubernetes.io/docs/concepts/containers/runtime-class
|
||||
runtimeClassName: "runc"
|
||||
# This is an advanced configuration. Don't touch it unless you know what you're doing.
|
||||
containers:
|
||||
- name: runner
|
||||
# Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer.
|
||||
# But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container,
|
||||
# just specified `privileged: true` like this.
|
||||
# See https://github.com/actions-runner-controller/actions-runner-controller/issues/1282
|
||||
# Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes
|
||||
# like firecracker and kata. Basically they run containers within dedicated micro vms and so
|
||||
# it's more like you can use `privileged: true` safer with those runtimes.
|
||||
#
|
||||
# privileged: true
|
||||
```
|
||||
|
||||
### Custom Volume mounts
|
||||
|
|
|
|||
|
|
@ -388,8 +388,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||
DockerEnabled: boolPtr(false),
|
||||
},
|
||||
want: newTestPod(dockerDisabled, func(p *corev1.Pod) {
|
||||
// TODO
|
||||
// p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
|
@ -880,7 +879,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||
},
|
||||
|
||||
want: newTestPod(dockerDisabled, func(p *corev1.Pod) {
|
||||
// p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -349,24 +349,51 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||
if len(runner.Spec.Containers) == 0 {
|
||||
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||
Name: "runner",
|
||||
ImagePullPolicy: runner.Spec.ImagePullPolicy,
|
||||
EnvFrom: runner.Spec.EnvFrom,
|
||||
Env: runner.Spec.Env,
|
||||
Resources: runner.Spec.Resources,
|
||||
})
|
||||
|
||||
if (runner.Spec.DockerEnabled == nil || *runner.Spec.DockerEnabled) && (runner.Spec.DockerdWithinRunnerContainer == nil || !*runner.Spec.DockerdWithinRunnerContainer) {
|
||||
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||
Name: "docker",
|
||||
VolumeMounts: runner.Spec.DockerVolumeMounts,
|
||||
Resources: runner.Spec.DockerdContainerResources,
|
||||
Env: runner.Spec.DockerEnv,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
template.Spec.Containers = runner.Spec.Containers
|
||||
}
|
||||
|
||||
for i, c := range template.Spec.Containers {
|
||||
switch c.Name {
|
||||
case "runner":
|
||||
if c.ImagePullPolicy == "" {
|
||||
template.Spec.Containers[i].ImagePullPolicy = runner.Spec.ImagePullPolicy
|
||||
}
|
||||
if len(c.EnvFrom) == 0 {
|
||||
template.Spec.Containers[i].EnvFrom = runner.Spec.EnvFrom
|
||||
}
|
||||
if len(c.Env) == 0 {
|
||||
template.Spec.Containers[i].Env = runner.Spec.Env
|
||||
}
|
||||
if len(c.Resources.Requests) == 0 {
|
||||
template.Spec.Containers[i].Resources.Requests = runner.Spec.Resources.Requests
|
||||
}
|
||||
if len(c.Resources.Limits) == 0 {
|
||||
template.Spec.Containers[i].Resources.Limits = runner.Spec.Resources.Limits
|
||||
}
|
||||
case "docker":
|
||||
if len(c.VolumeMounts) == 0 {
|
||||
template.Spec.Containers[i].VolumeMounts = runner.Spec.DockerVolumeMounts
|
||||
}
|
||||
if len(c.Resources.Limits) == 0 {
|
||||
template.Spec.Containers[i].Resources.Limits = runner.Spec.DockerdContainerResources.Limits
|
||||
}
|
||||
if len(c.Resources.Requests) == 0 {
|
||||
template.Spec.Containers[i].Resources.Requests = runner.Spec.DockerdContainerResources.Requests
|
||||
}
|
||||
if len(c.Env) == 0 {
|
||||
template.Spec.Containers[i].Env = runner.Spec.DockerEnv
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template.Spec.SecurityContext = runner.Spec.SecurityContext
|
||||
template.Spec.EnableServiceLinks = runner.Spec.EnableServiceLinks
|
||||
|
||||
|
|
@ -623,8 +650,11 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||
if runnerContainer.SecurityContext == nil {
|
||||
runnerContainer.SecurityContext = &corev1.SecurityContext{}
|
||||
}
|
||||
|
||||
if runnerContainer.SecurityContext.Privileged == nil {
|
||||
// Runner need to run privileged if it contains DinD
|
||||
runnerContainer.SecurityContext.Privileged = &dockerdInRunnerPrivileged
|
||||
}
|
||||
|
||||
pod := template.DeepCopy()
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue