Merge pull request #1758 from actions-runner-controller/fix-e2e

e2e: A bunch of fixes
This commit is contained in:
Yusuke Kuoka 2022-08-27 16:29:56 +09:00 committed by GitHub
commit 623c84fa52
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 208 additions and 102 deletions

View File

@ -6,6 +6,8 @@ OP=${OP:-apply}
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted} RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
if [ -n "${TEST_REPO}" ]; then if [ -n "${TEST_REPO}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f - cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f -

View File

@ -20,6 +20,10 @@ rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["secrets"] resources: ["secrets"]
verbs: ["get", "list", "create", "delete"] verbs: ["get", "list", "create", "delete"]
# Needed to report test success by crating a cm from within workflow job step
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "delete"]
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
@ -33,7 +37,7 @@ rules:
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: runner name: ${RUNNER_SERVICE_ACCOUNT_NAME}
namespace: ${NAMESPACE} namespace: ${NAMESPACE}
--- ---
# To verify it's working, try: # To verify it's working, try:
@ -50,7 +54,7 @@ metadata:
namespace: ${NAMESPACE} namespace: ${NAMESPACE}
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: runner name: ${RUNNER_SERVICE_ACCOUNT_NAME}
namespace: ${NAMESPACE} namespace: ${NAMESPACE}
roleRef: roleRef:
kind: ClusterRole kind: ClusterRole
@ -64,7 +68,7 @@ metadata:
namespace: ${NAMESPACE} namespace: ${NAMESPACE}
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: runner name: ${RUNNER_SERVICE_ACCOUNT_NAME}
namespace: ${NAMESPACE} namespace: ${NAMESPACE}
roleRef: roleRef:
kind: ClusterRole kind: ClusterRole

View File

@ -49,6 +49,10 @@ spec:
labels: labels:
- "${RUNNER_LABEL}" - "${RUNNER_LABEL}"
env:
- name: ROLLING_UPDATE_PHASE
value: "${ROLLING_UPDATE_PHASE}"
# #
# Non-standard working directory # Non-standard working directory
# #
@ -64,6 +68,7 @@ spec:
resources: resources:
requests: requests:
storage: 10Gi storage: 10Gi
serviceAccountName: ${RUNNER_SERVICE_ACCOUNT_NAME}
--- ---
apiVersion: actions.summerwind.dev/v1alpha1 apiVersion: actions.summerwind.dev/v1alpha1
kind: HorizontalRunnerAutoscaler kind: HorizontalRunnerAutoscaler

View File

@ -112,6 +112,7 @@ spec:
labels: labels:
app: ${NAME} app: ${NAME}
spec: spec:
serviceAccountName: ${RUNNER_SERVICE_ACCOUNT_NAME}
containers: containers:
- name: runner - name: runner
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
@ -120,6 +121,8 @@ spec:
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}" value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
- name: GOMODCACHE - name: GOMODCACHE
value: "/home/runner/.cache/go-mod" value: "/home/runner/.cache/go-mod"
- name: ROLLING_UPDATE_PHASE
value: "${ROLLING_UPDATE_PHASE}"
# PV-backed runner work dir # PV-backed runner work dir
volumeMounts: volumeMounts:
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode # Comment out the ephemeral work volume if you're going to test the kubernetes container mode
@ -152,19 +155,19 @@ spec:
# https://github.com/actions/setup-go/blob/56a61c9834b4a4950dbbf4740af0b8a98c73b768/src/installer.ts#L144 # https://github.com/actions/setup-go/blob/56a61c9834b4a4950dbbf4740af0b8a98c73b768/src/installer.ts#L144
mountPath: "/opt/hostedtoolcache" mountPath: "/opt/hostedtoolcache"
# Valid only when dockerdWithinRunnerContainer=false # Valid only when dockerdWithinRunnerContainer=false
- name: docker # - name: docker
# PV-backed runner work dir # # PV-backed runner work dir
volumeMounts: # volumeMounts:
- name: work # - name: work
mountPath: /runner/_work # mountPath: /runner/_work
# Cache docker image layers, in case dockerdWithinRunnerContainer=false # # Cache docker image layers, in case dockerdWithinRunnerContainer=false
- name: var-lib-docker # - name: var-lib-docker
mountPath: /var/lib/docker # mountPath: /var/lib/docker
# image: mumoshu/actions-runner-dind:dev # # image: mumoshu/actions-runner-dind:dev
# For buildx cache # # For buildx cache
- name: cache # - name: cache
mountPath: "/home/runner/.cache" # mountPath: "/home/runner/.cache"
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode # Comment out the ephemeral work volume if you're going to test the kubernetes container mode
# volumes: # volumes:
# - name: work # - name: work

View File

@ -75,6 +75,10 @@ func syncPVC(ctx context.Context, c client.Client, log logr.Logger, ns string, p
log.V(2).Info("Reconciling runner PVC") log.V(2).Info("Reconciling runner PVC")
// TODO: Probably we'd better remove PVCs related to the RunnetSet that is nowhere now?
// Otherwise, a bunch of continuously recreated StatefulSet
// can leave dangling PVCs forever, which might stress the cluster.
var sts appsv1.StatefulSet var sts appsv1.StatefulSet
if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: stsName}, &sts); err != nil { if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: stsName}, &sts); err != nil {
if !kerrors.IsNotFound(err) { if !kerrors.IsNotFound(err) {

View File

@ -8,6 +8,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/actions-runner-controller/actions-runner-controller/testing" "github.com/actions-runner-controller/actions-runner-controller/testing"
@ -25,6 +26,8 @@ const (
) )
var ( var (
// See the below link for maintained versions of cert-manager
// https://cert-manager.io/docs/installation/supported-releases/
certManagerVersion = "v1.8.2" certManagerVersion = "v1.8.2"
images = []testing.ContainerImage{ images = []testing.ContainerImage{
@ -36,6 +39,8 @@ var (
} }
testResultCMNamePrefix = "test-result-" testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.296.0"
) )
// If you're willing to run this test via VS Code "run test" or "debug test", // If you're willing to run this test via VS Code "run test" or "debug test",
@ -119,6 +124,7 @@ func TestE2E(t *testing.T) {
t.Fatalf("Failed to parse duration %q: %v", vt, err) t.Fatalf("Failed to parse duration %q: %v", vt, err)
} }
} }
env.doDockerBuild = os.Getenv("ARC_E2E_DO_DOCKER_BUILD") != ""
t.Run("build and load images", func(t *testing.T) { t.Run("build and load images", func(t *testing.T) {
env.buildAndLoadImages(t) env.buildAndLoadImages(t)
@ -210,12 +216,37 @@ func TestE2E(t *testing.T) {
return return
} }
ctx, cancel := context.WithCancel(context.Background())
go func() {
for i := 1; ; i++ {
select {
case _, ok := <-ctx.Done():
if !ok {
t.Logf("Stopping the continuous rolling-update of runners")
}
default:
time.Sleep(60 * time.Second)
t.Run(fmt.Sprintf("update runners attempt %d", i), func(t *testing.T) {
env.deploy(t, RunnerSets, testID, fmt.Sprintf("ROLLING_UPDATE_PHASE=%d", i))
})
}
}
}()
t.Cleanup(func() {
cancel()
})
t.Run("Verify workflow run result", func(t *testing.T) { t.Run("Verify workflow run result", func(t *testing.T) {
env.verifyActionsWorkflowRun(t, testID) env.verifyActionsWorkflowRun(t, testID)
}) })
}) })
t.Run("RunnerDeployments", func(t *testing.T) { t.Run("RunnerDeployments", func(t *testing.T) {
if os.Getenv("ARC_E2E_SKIP_RUNNERDEPLOYMENT") != "" {
t.Skip("RunnerSets test has been skipped due to ARC_E2E_SKIP_RUNNERSETS")
}
var ( var (
testID string testID string
) )
@ -285,6 +316,27 @@ func TestE2E(t *testing.T) {
return return
} }
ctx, cancel := context.WithCancel(context.Background())
go func() {
for i := 1; ; i++ {
select {
case _, ok := <-ctx.Done():
if !ok {
t.Logf("Stopping the continuous rolling-update of runners")
}
default:
time.Sleep(10 * time.Second)
t.Run(fmt.Sprintf("update runners - attempt %d", i), func(t *testing.T) {
env.deploy(t, RunnerDeployments, testID, fmt.Sprintf("ROLLING_UPDATE_PHASE=%d", i))
})
}
}
}()
t.Cleanup(func() {
cancel()
})
t.Run("Verify workflow run result", func(t *testing.T) { t.Run("Verify workflow run result", func(t *testing.T) {
env.verifyActionsWorkflowRun(t, testID) env.verifyActionsWorkflowRun(t, testID)
}) })
@ -315,7 +367,10 @@ type env struct {
minReplicas int64 minReplicas int64
dockerdWithinRunnerContainer bool dockerdWithinRunnerContainer bool
rootlessDocker bool rootlessDocker bool
doDockerBuild bool
containerMode string containerMode string
runnerServiceAccuontName string
runnerNamespace string
remoteKubeconfig string remoteKubeconfig string
imagePullSecretName string imagePullSecretName string
imagePullPolicy string imagePullPolicy string
@ -383,7 +438,7 @@ func buildVars(repo string) vars {
Args: []testing.BuildArg{ Args: []testing.BuildArg{
{ {
Name: "RUNNER_VERSION", Name: "RUNNER_VERSION",
Value: "2.294.0", Value: RunnerVersion,
}, },
}, },
Image: runnerImage, Image: runnerImage,
@ -394,7 +449,7 @@ func buildVars(repo string) vars {
Args: []testing.BuildArg{ Args: []testing.BuildArg{
{ {
Name: "RUNNER_VERSION", Name: "RUNNER_VERSION",
Value: "2.294.0", Value: RunnerVersion,
}, },
}, },
Image: runnerDindImage, Image: runnerDindImage,
@ -405,7 +460,7 @@ func buildVars(repo string) vars {
Args: []testing.BuildArg{ Args: []testing.BuildArg{
{ {
Name: "RUNNER_VERSION", Name: "RUNNER_VERSION",
Value: "2.294.0", Value: RunnerVersion,
}, },
}, },
Image: runnerRootlessDindImage, Image: runnerRootlessDindImage,
@ -444,6 +499,8 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "") e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "")
e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "") e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "")
e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "") e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "")
e.runnerServiceAccuontName = testing.Getenv(t, "TEST_RUNNER_SERVICE_ACCOUNT_NAME", "")
e.runnerNamespace = testing.Getenv(t, "TEST_RUNNER_NAMESPACE", "default")
e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "") e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "")
e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "") e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "")
e.vars = vars e.vars = vars
@ -507,9 +564,9 @@ func (e *env) checkGitHubToken(t *testing.T, tok string) error {
c := github.NewClient(&http.Client{Transport: transport}) c := github.NewClient(&http.Client{Transport: transport})
aa, res, err := c.Octocat(context.Background(), "hello") aa, res, err := c.Octocat(context.Background(), "hello")
if err != nil { if err != nil {
b, err := io.ReadAll(res.Body) b, ioerr := io.ReadAll(res.Body)
if err != nil { if ioerr != nil {
t.Logf("%v", err) t.Logf("%v", ioerr)
return err return err
} }
t.Logf(string(b)) t.Logf(string(b))
@ -518,14 +575,42 @@ func (e *env) checkGitHubToken(t *testing.T, tok string) error {
t.Logf("%s", aa) t.Logf("%s", aa)
if _, res, err := c.Actions.CreateRegistrationToken(ctx, e.testOrg, e.testOrgRepo); err != nil { if e.testEnterprise != "" {
b, err := io.ReadAll(res.Body) if _, res, err := c.Enterprise.CreateRegistrationToken(ctx, e.testEnterprise); err != nil {
if err != nil { b, ioerr := io.ReadAll(res.Body)
t.Logf("%v", err) if ioerr != nil {
t.Logf("%v", ioerr)
return err
}
t.Logf(string(b))
return err
}
}
if e.testOrg != "" {
if _, res, err := c.Actions.CreateOrganizationRegistrationToken(ctx, e.testOrg); err != nil {
b, ioerr := io.ReadAll(res.Body)
if ioerr != nil {
t.Logf("%v", ioerr)
return err
}
t.Logf(string(b))
return err
}
}
if e.testRepo != "" {
s := strings.Split(e.testRepo, "/")
owner, repo := s[0], s[1]
if _, res, err := c.Actions.CreateRegistrationToken(ctx, owner, repo); err != nil {
b, ioerr := io.ReadAll(res.Body)
if ioerr != nil {
t.Logf("%v", ioerr)
return err
}
t.Logf(string(b))
return err return err
} }
t.Logf(string(b))
return err
} }
return nil return nil
@ -620,9 +705,9 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv}) e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
} }
func (e *env) deploy(t *testing.T, kind DeployKind, testID string) { func (e *env) deploy(t *testing.T, kind DeployKind, testID string, env ...string) {
t.Helper() t.Helper()
e.do(t, "apply", kind, testID) e.do(t, "apply", kind, testID, env...)
} }
func (e *env) undeploy(t *testing.T, kind DeployKind, testID string) { func (e *env) undeploy(t *testing.T, kind DeployKind, testID string) {
@ -630,7 +715,7 @@ func (e *env) undeploy(t *testing.T, kind DeployKind, testID string) {
e.do(t, "delete", kind, testID) e.do(t, "delete", kind, testID)
} }
func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) { func (e *env) do(t *testing.T, op string, kind DeployKind, testID string, env ...string) {
t.Helper() t.Helper()
e.createControllerNamespaceAndServiceAccount(t) e.createControllerNamespaceAndServiceAccount(t)
@ -638,7 +723,10 @@ func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) {
scriptEnv := []string{ scriptEnv := []string{
"KUBECONFIG=" + e.Kubeconfig, "KUBECONFIG=" + e.Kubeconfig,
"OP=" + op, "OP=" + op,
"RUNNER_NAMESPACE=" + e.runnerNamespace,
"RUNNER_SERVICE_ACCOUNT_NAME=" + e.runnerServiceAccuontName,
} }
scriptEnv = append(scriptEnv, env...)
switch kind { switch kind {
case RunnerSets: case RunnerSets:
@ -730,7 +818,7 @@ func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
func (e *env) installActionsWorkflow(t *testing.T, kind DeployKind, testID string) { func (e *env) installActionsWorkflow(t *testing.T, kind DeployKind, testID string) {
t.Helper() t.Helper()
installActionsWorkflow(t, e.testName+" "+testID, e.runnerLabel(testID), testResultCMNamePrefix, e.repoToCommit, kind, e.testJobs(testID), !e.rootlessDocker) installActionsWorkflow(t, e.testName+" "+testID, e.runnerLabel(testID), testResultCMNamePrefix, e.repoToCommit, kind, e.testJobs(testID), !e.rootlessDocker, e.doDockerBuild)
} }
func (e *env) testJobs(testID string) []job { func (e *env) testJobs(testID string) []job {
@ -772,7 +860,7 @@ func createTestJobs(id, testResultCMNamePrefix string, numJobs int) []job {
const Branch = "main" const Branch = "main"
// useSudo also implies rootful docker and the use of buildx cache export/import // useSudo also implies rootful docker and the use of buildx cache export/import
func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, kind DeployKind, testJobs []job, useSudo bool) { func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, kind DeployKind, testJobs []job, useSudo, doDockerBuild bool) {
t.Helper() t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
@ -827,32 +915,30 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
} }
} }
steps = append(steps,
testing.Step{
// This might be the easiest way to handle permissions without use of securityContext
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
Run: sudo + "chmod 777 -R \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\"",
},
)
if useSudo { if useSudo {
steps = append(steps, steps = append(steps,
testing.Step{
// This might be the easiest way to handle permissions without use of securityContext
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
Run: sudo + "chmod 777 -R \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\"",
},
testing.Step{ testing.Step{
Run: sudo + "chmod 777 -R \"/var/lib/docker\"", Run: sudo + "chmod 777 -R \"/var/lib/docker\"",
}, },
testing.Step{
// This might be the easiest way to handle permissions without use of securityContext
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
Run: "ls -lah \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\"",
},
testing.Step{
// This might be the easiest way to handle permissions without use of securityContext
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
Run: "ls -lah \"/var/lib/docker\" || echo ls failed.",
},
) )
} }
steps = append(steps, steps = append(steps,
testing.Step{
// This might be the easiest way to handle permissions without use of securityContext
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
Run: "ls -lah \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\"",
},
testing.Step{
// This might be the easiest way to handle permissions without use of securityContext
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
Run: "ls -lah \"/var/lib/docker\" || echo ls failed.",
},
testing.Step{ testing.Step{
Uses: "actions/setup-go@v3", Uses: "actions/setup-go@v3",
With: &testing.With{ With: &testing.With{
@ -871,75 +957,77 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
}, },
) )
if !kubernetesContainerMode { if doDockerBuild {
setupBuildXActionWith := &testing.With{ if !kubernetesContainerMode {
BuildkitdFlags: "--debug", setupBuildXActionWith := &testing.With{
Endpoint: "mycontext", BuildkitdFlags: "--debug",
// As the consequence of setting `install: false`, it doesn't install buildx as an alias to `docker build` Endpoint: "mycontext",
// so we need to use `docker buildx build` in the next step // As the consequence of setting `install: false`, it doesn't install buildx as an alias to `docker build`
Install: false, // so we need to use `docker buildx build` in the next step
} Install: false,
var dockerBuildCache, dockerfile string }
if useSudo { var dockerBuildCache, dockerfile string
// This needs to be set only when rootful docker mode. if useSudo {
// When rootless, we need to use the `docker` buildx driver, which doesn't support cache export // This needs to be set only when rootful docker mode.
// so we end up with the below error on docker-build: // When rootless, we need to use the `docker` buildx driver, which doesn't support cache export
// error: cache export feature is currently not supported for docker driver. Please switch to a different driver (eg. "docker buildx create --use") // so we end up with the below error on docker-build:
dockerBuildCache = "--cache-from=type=local,src=/home/runner/.cache/buildx " + // error: cache export feature is currently not supported for docker driver. Please switch to a different driver (eg. "docker buildx create --use")
"--cache-to=type=local,dest=/home/runner/.cache/buildx-new,mode=max " dockerBuildCache = "--cache-from=type=local,src=/home/runner/.cache/buildx " +
dockerfile = "Dockerfile" "--cache-to=type=local,dest=/home/runner/.cache/buildx-new,mode=max "
} else { dockerfile = "Dockerfile"
setupBuildXActionWith.Driver = "docker" } else {
dockerfile = "Dockerfile.nocache" setupBuildXActionWith.Driver = "docker"
} dockerfile = "Dockerfile.nocache"
steps = append(steps, }
testing.Step{
// https://github.com/docker/buildx/issues/413#issuecomment-710660155
// To prevent setup-buildx-action from failing with:
// error: could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`
Run: "docker context create mycontext",
},
testing.Step{
Run: "docker context use mycontext",
},
testing.Step{
Name: "Set up Docker Buildx",
Uses: "docker/setup-buildx-action@v1",
With: setupBuildXActionWith,
},
testing.Step{
Run: "docker buildx build --platform=linux/amd64 " +
dockerBuildCache +
fmt.Sprintf("-f %s .", dockerfile),
},
)
if useSudo {
steps = append(steps, steps = append(steps,
testing.Step{ testing.Step{
// https://github.com/docker/build-push-action/blob/master/docs/advanced/cache.md#local-cache // https://github.com/docker/buildx/issues/413#issuecomment-710660155
// See https://github.com/moby/buildkit/issues/1896 for why this is needed // To prevent setup-buildx-action from failing with:
Run: "rm -rf /home/runner/.cache/buildx && mv /home/runner/.cache/buildx-new /home/runner/.cache/buildx", // error: could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`
Run: "docker context create mycontext",
}, },
testing.Step{ testing.Step{
Run: "ls -lah /home/runner/.cache/*", Run: "docker context use mycontext",
},
testing.Step{
Name: "Set up Docker Buildx",
Uses: "docker/setup-buildx-action@v1",
With: setupBuildXActionWith,
},
testing.Step{
Run: "docker buildx build --platform=linux/amd64 " +
dockerBuildCache +
fmt.Sprintf("-f %s .", dockerfile),
}, },
) )
} }
}
if useSudo {
steps = append(steps, steps = append(steps,
testing.Step{ testing.Step{
Uses: "azure/setup-kubectl@v1", // https://github.com/docker/build-push-action/blob/master/docs/advanced/cache.md#local-cache
With: &testing.With{ // See https://github.com/moby/buildkit/issues/1896 for why this is needed
Version: "v1.20.2", Run: "rm -rf /home/runner/.cache/buildx && mv /home/runner/.cache/buildx-new /home/runner/.cache/buildx",
},
}, },
testing.Step{ testing.Step{
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg), Run: "ls -lah /home/runner/.cache/*",
}, },
) )
} }
steps = append(steps,
testing.Step{
Uses: "azure/setup-kubectl@v1",
With: &testing.With{
Version: "v1.20.2",
},
},
testing.Step{
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg),
},
)
wf.Jobs[j.name] = testing.Job{ wf.Jobs[j.name] = testing.Job{
RunsOn: runnerLabel, RunsOn: runnerLabel,
Container: container, Container: container,