Revert docker.sock path to /var/run/docker.sock (#2536)

Starting ARC v0.27.2, we've changed the `docker.sock` path from `/var/run/docker.sock` to `/var/run/docker/docker.sock`. That resulted in breaking some container-based actions due to the hard-coded `docker.sock` path in various places.

Even `actions/runner` seem to use `/var/run/docker.sock` for building container-based actions and for service containers?

Anyway, this fixes that by moving the sock file back to the previous location.

Once this gets merged, users stuck at ARC v0.27.1, previously upgraded to 0.27.2 or 0.27.3 and reverted back to v0.27.1 due to #2519, should be able to upgrade to the upcoming v0.27.4.

Resolves #2519
Resolves #2538
This commit is contained in:
Yusuke Kuoka 2023-04-27 13:06:35 +09:00 committed by GitHub
parent 9859bbc7f2
commit 94c089c407
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 118 additions and 43 deletions

View File

@ -102,6 +102,7 @@ if [ "${tool}" == "helm" ]; then
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
--set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
--set image.dindSidecarRepositoryAndTag=${DIND_SIDECAR_REPOSITORY_AND_TAG} \
-f ${VALUES_FILE}
set +v
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`

View File

@ -6,6 +6,10 @@ OP=${OP:-apply}
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
# See https://github.com/actions/actions-runner-controller/issues/2123
kubectl delete secret generic docker-config || :
kubectl create secret generic docker-config --from-file .dockerconfigjson=<(jq -M 'del(.aliases)' $HOME/.docker/config.json) --type=kubernetes.io/dockerconfigjson || :
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
if [ -n "${TEST_REPO}" ]; then

View File

@ -95,6 +95,24 @@ spec:
# that part is created by dockerd.
mountPath: /home/runner/.local
readOnly: false
# See https://github.com/actions/actions-runner-controller/issues/2123
# Be sure to omit the "aliases" field from the config.json.
# Otherwise you may encounter nasty errors like:
# $ docker build
# docker: 'buildx' is not a docker command.
# See 'docker --help'
# due to the incompatibility between your host docker config.json and the runner environment.
# That is, your host dockcer config.json might contain this:
# "aliases": {
# "builder": "buildx"
# }
# And this results in the above error when the runner does not have buildx installed yet.
- name: docker-config
mountPath: /home/runner/.docker/config.json
subPath: config.json
readOnly: true
- name: docker-config-root
mountPath: /home/runner/.docker
volumes:
- name: rootless-dind-work-dir
ephemeral:
@ -105,6 +123,15 @@ spec:
resources:
requests:
storage: 3Gi
- name: docker-config
# Refer to .dockerconfigjson/.docker/config.json
secret:
secretName: docker-config
items:
- key: .dockerconfigjson
path: config.json
- name: docker-config-root
emptyDir: {}
#
# Non-standard working directory

View File

@ -91,7 +91,7 @@ func TestNewRunnerPod(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "var-run",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
@ -155,7 +155,7 @@ func TestNewRunnerPod(t *testing.T) {
},
{
Name: "DOCKER_HOST",
Value: "unix:///run/docker/docker.sock",
Value: "unix:///run/docker.sock",
},
},
VolumeMounts: []corev1.VolumeMount{
@ -168,8 +168,8 @@ func TestNewRunnerPod(t *testing.T) {
MountPath: "/runner/_work",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "var-run",
MountPath: "/run",
},
},
ImagePullPolicy: corev1.PullAlways,
@ -180,7 +180,7 @@ func TestNewRunnerPod(t *testing.T) {
Image: "default-docker-image",
Args: []string{
"dockerd",
"--host=unix:///run/docker/docker.sock",
"--host=unix:///run/docker.sock",
"--group=$(DOCKER_GROUP_GID)",
},
Env: []corev1.EnvVar{
@ -195,8 +195,8 @@ func TestNewRunnerPod(t *testing.T) {
MountPath: "/runner",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "var-run",
MountPath: "/run",
},
{
Name: "work",
@ -543,7 +543,7 @@ func TestNewRunnerPod(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "var-run",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
@ -562,8 +562,8 @@ func TestNewRunnerPod(t *testing.T) {
MountPath: "/runner",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "var-run",
MountPath: "/run",
},
}
}),
@ -587,7 +587,7 @@ func TestNewRunnerPod(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "var-run",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
@ -676,7 +676,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "var-run",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
@ -740,7 +740,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
},
{
Name: "DOCKER_HOST",
Value: "unix:///run/docker/docker.sock",
Value: "unix:///run/docker.sock",
},
{
Name: "RUNNER_NAME",
@ -761,8 +761,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
MountPath: "/runner/_work",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "var-run",
MountPath: "/run",
},
},
ImagePullPolicy: corev1.PullAlways,
@ -773,7 +773,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
Image: "default-docker-image",
Args: []string{
"dockerd",
"--host=unix:///run/docker/docker.sock",
"--host=unix:///run/docker.sock",
"--group=$(DOCKER_GROUP_GID)",
},
Env: []corev1.EnvVar{
@ -788,8 +788,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
MountPath: "/runner",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "var-run",
MountPath: "/run",
},
{
Name: "work",
@ -1149,8 +1149,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
MountPath: "/runner/_work",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "var-run",
MountPath: "/run",
},
},
},
@ -1170,7 +1170,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "var-run",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
@ -1186,8 +1186,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
MountPath: "/runner/_work",
},
{
Name: "docker-sock",
MountPath: "/run/docker",
Name: "var-run",
MountPath: "/run",
},
{
Name: "runner",
@ -1219,7 +1219,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
},
},
{
Name: "docker-sock",
Name: "var-run",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,

View File

@ -778,6 +778,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
useRunnerStatusUpdateHook = d.UseRunnerStatusUpdateHook
)
const (
varRunVolumeName = "var-run"
varRunVolumeMountPath = "/run"
)
if containerMode == "kubernetes" {
dockerdInRunner = false
dockerEnabled = false
@ -1020,7 +1025,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
// explicitly invoke `dockerd` to avoid automatic TLS / TCP binding
dockerdContainer.Args = append([]string{
"dockerd",
"--host=unix:///run/docker/docker.sock",
"--host=unix:///run/docker.sock",
}, dockerdContainer.Args...)
// this must match a GID for the user in the runner image
@ -1054,7 +1059,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
runnerContainer.Env = append(runnerContainer.Env,
corev1.EnvVar{
Name: "DOCKER_HOST",
Value: "unix:///run/docker/docker.sock",
Value: "unix:///run/docker.sock",
},
)
@ -1071,7 +1076,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
pod.Spec.Volumes = append(pod.Spec.Volumes,
corev1.Volume{
Name: "docker-sock",
Name: varRunVolumeName,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
Medium: corev1.StorageMediumMemory,
@ -1090,11 +1095,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
)
}
if ok, _ := volumeMountPresent("docker-sock", runnerContainer.VolumeMounts); !ok {
if ok, _ := volumeMountPresent(varRunVolumeName, runnerContainer.VolumeMounts); !ok {
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
corev1.VolumeMount{
Name: "docker-sock",
MountPath: "/run/docker",
Name: varRunVolumeName,
MountPath: varRunVolumeMountPath,
},
)
}
@ -1108,10 +1113,10 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
},
}
if p, _ := volumeMountPresent("docker-sock", dockerdContainer.VolumeMounts); !p {
if p, _ := volumeMountPresent(varRunVolumeName, dockerdContainer.VolumeMounts); !p {
dockerVolumeMounts = append(dockerVolumeMounts, corev1.VolumeMount{
Name: "docker-sock",
MountPath: "/run/docker",
Name: varRunVolumeName,
MountPath: varRunVolumeMountPath,
})
}

View File

@ -31,13 +31,8 @@ var (
// https://cert-manager.io/docs/installation/supported-releases/
certManagerVersion = "v1.8.2"
images = []testing.ContainerImage{
testing.Img("docker", "dind"),
testing.Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"),
testing.Img("quay.io/jetstack/cert-manager-controller", certManagerVersion),
testing.Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion),
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
}
arcStableImageRepo = "summerwind/actions-runner-controller"
arcStableImageTag = "v0.25.2"
testResultCMNamePrefix = "test-result-"
@ -105,8 +100,8 @@ func TestE2E(t *testing.T) {
}{
{
label: "stable",
controller: "summerwind/actions-runner-controller",
controllerVer: "v0.25.2",
controller: arcStableImageRepo,
controllerVer: arcStableImageTag,
chart: "actions-runner-controller/actions-runner-controller",
// 0.20.2 accidentally added support for runner-status-update which isn't supported by ARC 0.25.2.
// With some chart values, the controller end up with crashlooping with `flag provided but not defined: -runner-status-update-hook`.
@ -423,6 +418,7 @@ type env struct {
admissionWebhooksTimeout string
imagePullSecretName string
imagePullPolicy string
dindSidecarRepositoryAndTag string
watchNamespace string
vars vars
@ -436,6 +432,8 @@ type vars struct {
runnerDindImageRepo string
runnerRootlessDindImageRepo string
dindSidecarImageRepo, dindSidecarImageTag string
prebuildImages []testing.ContainerImage
builds []testing.DockerBuild
@ -458,6 +456,10 @@ func buildVars(repo, ubuntuVer string) vars {
runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
runnerDindImage = testing.Img(runnerDindImageRepo, runnerImageTag)
runnerRootlessDindImage = testing.Img(runnerRootlessDindImageRepo, runnerImageTag)
dindSidecarImageRepo = "docker"
dindSidecarImageTag = "20.10.23-dind"
dindSidecarImage = testing.Img(dindSidecarImageRepo, dindSidecarImageTag)
)
var vs vars
@ -467,6 +469,9 @@ func buildVars(repo, ubuntuVer string) vars {
vs.runnerRootlessDindImageRepo = runnerRootlessDindImageRepo
vs.runnerImageRepo = runnerImageRepo
vs.dindSidecarImageRepo = dindSidecarImageRepo
vs.dindSidecarImageTag = dindSidecarImageTag
// vs.controllerImage, vs.controllerImageTag
vs.prebuildImages = []testing.ContainerImage{
@ -474,6 +479,7 @@ func buildVars(repo, ubuntuVer string) vars {
runnerImage,
runnerDindImage,
runnerRootlessDindImage,
dindSidecarImage,
}
vs.builds = []testing.DockerBuild{
@ -558,6 +564,8 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "")
e.admissionWebhooksTimeout = testing.Getenv(t, "ARC_E2E_ADMISSION_WEBHOOKS_TIMEOUT", "")
e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "")
// This should be the default for Ubuntu 20.04 based runner images
e.dindSidecarRepositoryAndTag = vars.dindSidecarImageRepo + ":" + vars.dindSidecarImageTag
e.vars = vars
if e.remoteKubeconfig != "" {
@ -569,6 +577,17 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
e.watchNamespace = testing.Getenv(t, "TEST_WATCH_NAMESPACE", "")
if e.remoteKubeconfig == "" {
images := []testing.ContainerImage{
testing.Img(vars.dindSidecarImageRepo, vars.dindSidecarImageTag),
testing.Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"),
testing.Img("quay.io/jetstack/cert-manager-controller", certManagerVersion),
testing.Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion),
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
// Otherwise kubelet would fail to pull images from DockerHub due too rate limit:
// Warning Failed 19s kubelet Failed to pull image "summerwind/actions-runner-controller:v0.25.2": rpc error: code = Unknown desc = failed to pull and unpack image "docker.io/summerwind/actions-runner-controller:v0.25.2": failed to copy: httpReadSeeker: failed open: unexpected status code https://registry-1.docker.io/v2/summerwind/actions-runner-controller/manifests/sha256:92faf7e9f7f09a6240cdb5eb82eaf448852bdddf2fb77d0a5669fd8e5062b97b: 429 Too Many Requests - Server message: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit
testing.Img(arcStableImageRepo, arcStableImageTag),
}
e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...))
e.Env.Kubeconfig = e.Kind.Kubeconfig()
} else {
@ -750,6 +769,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch
"ADMISSION_WEBHOOKS_TIMEOUT=" + e.admissionWebhooksTimeout,
"IMAGE_PULL_SECRET=" + e.imagePullSecretName,
"IMAGE_PULL_POLICY=" + e.imagePullPolicy,
"DIND_SIDECAR_REPOSITORY_AND_TAG=" + e.dindSidecarRepositoryAndTag,
"WATCH_NAMESPACE=" + e.watchNamespace,
}
@ -1156,10 +1176,21 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
With: setupBuildXActionWith,
},
testing.Step{
Run: "docker buildx build --platform=linux/amd64 " +
Run: "docker buildx build --platform=linux/amd64 -t test1 --load " +
dockerBuildCache +
fmt.Sprintf("-f %s .", dockerfile),
},
testing.Step{
Run: "docker run --rm test1",
},
testing.Step{
Uses: "addnab/docker-run-action@v3",
With: &testing.With{
Image: "test1",
Run: "hello",
Shell: "sh",
},
},
)
if useSudo {

View File

@ -55,4 +55,11 @@ type With struct {
// Needs to be "docker" in rootless mode
// https://stackoverflow.com/questions/66142872/how-to-solve-error-with-rootless-docker-in-github-actions-self-hosted-runner-wr
Driver string `json:"driver,omitempty"`
// Image is the image arg passed to docker-run-action
Image string `json:"image,omitempty"`
// Run is the run arg passed to docker-run-action
Run string `json:"run,omitempty"`
// Shell is the shell arg passed to docker-run-action
Shell string `json:"shell,omitempty"`
}