From 4446ba57e1a21077136eb371795b7fe0acbc72c7 Mon Sep 17 00:00:00 2001 From: Yusuke Kuoka Date: Fri, 1 Jul 2022 21:32:05 +0900 Subject: [PATCH] Cover ARC upgrade in E2E test (#1592) * Cover ARC upgrade in E2E test so that we can make it extra sure that you can upgrade the existing installation of ARC to the next and also (hopefully) it is backward-compatible, or at least it does not break immediately after upgrading. * Consolidate E2E tests for RS and RD * Fix E2E for RD to pass * Add some comment in E2E for how to release disk consumed after dozens of test runs --- acceptance/deploy.sh | 53 ------ acceptance/deploy_runners.sh | 58 +++++++ test/e2e/e2e_test.go | 312 +++++++++++++++++++++++------------ testing/kubectl.go | 10 ++ testing/testing.go | 31 +++- 5 files changed, 296 insertions(+), 168 deletions(-) create mode 100755 acceptance/deploy_runners.sh diff --git a/acceptance/deploy.sh b/acceptance/deploy.sh index fea6002f..effe1383 100755 --- a/acceptance/deploy.sh +++ b/acceptance/deploy.sh @@ -76,56 +76,3 @@ kubectl -n actions-runner-system wait deploy/actions-runner-controller --for con # Adhocly wait for some time until actions-runner-controller's admission webhook gets ready sleep 20 - -RUNNER_LABEL=${RUNNER_LABEL:-self-hosted} - -if [ -n "${TEST_REPO}" ]; then - if [ "${USE_RUNNERSET}" != "false" ]; then - cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl apply -f - - else - echo 'Deploying runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead.' - cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl apply -f - - fi -else - echo 'Skipped deploying runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy.' -fi - -if [ -n "${TEST_ORG}" ]; then - if [ "${USE_RUNNERSET}" != "false" ]; then - cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl apply -f - - else - cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl apply -f - - fi - - if [ -n "${TEST_ORG_GROUP}" ]; then - if [ "${USE_RUNNERSET}" != "false" ]; then - cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerset envsubst | kubectl apply -f - - else - cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl apply -f - - fi - else - echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ORG_GROUP to deploy.' - fi -else - echo 'Skipped deploying organizational runnerdeployment. Set TEST_ORG to deploy.' -fi - -if [ -n "${TEST_ENTERPRISE}" ]; then - if [ "${USE_RUNNERSET}" != "false" ]; then - cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl apply -f - - else - cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl apply -f - - fi - - if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then - if [ "${USE_RUNNERSET}" != "false" ]; then - cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl apply -f - - else - cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl apply -f - - fi - else - echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to deploy.' - fi -else - echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE to deploy.' -fi diff --git a/acceptance/deploy_runners.sh b/acceptance/deploy_runners.sh new file mode 100755 index 00000000..0c0ecfbe --- /dev/null +++ b/acceptance/deploy_runners.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +set -e + +OP=${OP:-apply} + +RUNNER_LABEL=${RUNNER_LABEL:-self-hosted} + +if [ -n "${TEST_REPO}" ]; then + if [ "${USE_RUNNERSET}" != "false" ]; then + cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f - + else + echo "Running ${OP} runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead." + cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl ${OP} -f - + fi +else + echo "Skipped ${OP} for runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy." +fi + +if [ -n "${TEST_ORG}" ]; then + if [ "${USE_RUNNERSET}" != "false" ]; then + cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl ${OP} -f - + else + cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl ${OP} -f - + fi + + if [ -n "${TEST_ORG_GROUP}" ]; then + if [ "${USE_RUNNERSET}" != "false" ]; then + cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerset envsubst | kubectl ${OP} -f - + else + cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl ${OP} -f - + fi + else + echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ORG_GROUP to ${OP}." + fi +else + echo "Skipped ${OP} on organizational runnerdeployment. Set TEST_ORG to ${OP}." +fi + +if [ -n "${TEST_ENTERPRISE}" ]; then + if [ "${USE_RUNNERSET}" != "false" ]; then + cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl ${OP} -f - + else + cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl ${OP} -f - + fi + + if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then + if [ "${USE_RUNNERSET}" != "false" ]; then + cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl ${OP} -f - + else + cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl ${OP} -f - + fi + else + echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to ${OP}." + fi +else + echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE to ${OP}." +fi diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 8f6ca8f4..a2d17634 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -13,6 +13,13 @@ import ( "sigs.k8s.io/yaml" ) +type DeployKind int + +const ( + RunnerSets DeployKind = iota + RunnerDeployments +) + var ( controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller" controllerImageTag = "e2e" @@ -72,8 +79,6 @@ var ( commonScriptEnv = []string{ "SYNC_PERIOD=" + "30s", - "NAME=" + controllerImageRepo, - "VERSION=" + controllerImageTag, "RUNNER_TAG=" + runnerImageTag, } @@ -101,13 +106,27 @@ var ( // whenever the whole test failed, so that you can immediately start fixing issues and rerun inidividual tests. // See the below link for how terratest handles this: // https://terratest.gruntwork.io/docs/testing-best-practices/iterating-locally-using-test-stages/ +// +// This functions leaves PVs undeleted. To delete PVs, run: +// kubectl get pv -ojson | jq -rMc '.items[] | select(.status.phase == "Available") | {name:.metadata.name, status:.status.phase} | .name' | xargs kubectl delete pv +// +// If you disk full after dozens of test runs, try: +// docker system prune +// and +// kind delete cluster --name teste2e +// +// The former tend to release 200MB-3GB and the latter can result in releasing like 100GB due to kind node contains loaded container images and +// (in case you use it) local provisioners disk image(which is implemented as a directory within the kind node). func TestE2E(t *testing.T) { if testing.Short() { t.Skip("Skipped as -short is set") } + skipRunnerCleanUp := os.Getenv("ARC_E2E_SKIP_RUNNER_CLEANUP") != "" + retainCluster := os.Getenv("ARC_E2E_RETAIN_CLUSTER") != "" + skipTestIDCleanUp := os.Getenv("ARC_E2E_SKIP_TEST_ID_CLEANUP") != "" + env := initTestEnv(t) - env.useRunnerSet = true t.Run("build and load images", func(t *testing.T) { env.buildAndLoadImages(t) @@ -121,72 +140,107 @@ func TestE2E(t *testing.T) { return } - t.Run("install actions-runner-controller and runners", func(t *testing.T) { - env.installActionsRunnerController(t) + t.Run("RunnerSets", func(t *testing.T) { + var ( + testID string + ) + + t.Run("get or generate test ID", func(t *testing.T) { + testID = env.GetOrGenerateTestID(t) + }) + + if !skipTestIDCleanUp { + t.Cleanup(func() { + env.DeleteTestID(t) + }) + } + + t.Run("install actions-runner-controller v0.24.1", func(t *testing.T) { + env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID) + }) + + t.Run("deploy runners", func(t *testing.T) { + env.deploy(t, RunnerSets, testID) + }) + + if !skipRunnerCleanUp { + t.Cleanup(func() { + env.undeploy(t, RunnerSets, testID) + }) + } + + t.Run("install edge actions-runner-controller", func(t *testing.T) { + env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID) + }) + + if t.Failed() { + return + } + + t.Run("Install workflow", func(t *testing.T) { + env.installActionsWorkflow(t, RunnerSets, testID) + }) + + if t.Failed() { + return + } + + t.Run("Verify workflow run result", func(t *testing.T) { + env.verifyActionsWorkflowRun(t, testID) + }) }) - if t.Failed() { - return - } + t.Run("RunnerDeployments", func(t *testing.T) { + var ( + testID string + ) - t.Run("Install workflow", func(t *testing.T) { - env.installActionsWorkflow(t) + t.Run("get or generate test ID", func(t *testing.T) { + testID = env.GetOrGenerateTestID(t) + }) + + if !skipTestIDCleanUp { + t.Cleanup(func() { + env.DeleteTestID(t) + }) + } + + t.Run("install actions-runner-controller v0.24.1", func(t *testing.T) { + env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID) + }) + + t.Run("deploy runners", func(t *testing.T) { + env.deploy(t, RunnerDeployments, testID) + }) + + if !skipRunnerCleanUp { + t.Cleanup(func() { + env.undeploy(t, RunnerDeployments, testID) + }) + } + + t.Run("install edge actions-runner-controller", func(t *testing.T) { + env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID) + }) + + if t.Failed() { + return + } + + t.Run("Install workflow", func(t *testing.T) { + env.installActionsWorkflow(t, RunnerDeployments, testID) + }) + + if t.Failed() { + return + } + + t.Run("Verify workflow run result", func(t *testing.T) { + env.verifyActionsWorkflowRun(t, testID) + }) }) - if t.Failed() { - return - } - - t.Run("Verify workflow run result", func(t *testing.T) { - env.verifyActionsWorkflowRun(t) - }) - - if os.Getenv("ARC_E2E_NO_CLEANUP") != "" { - t.FailNow() - } -} - -func TestE2ERunnerDeploy(t *testing.T) { - if testing.Short() { - t.Skip("Skipped as -short is set") - } - - env := initTestEnv(t) - env.useApp = true - - t.Run("build and load images", func(t *testing.T) { - env.buildAndLoadImages(t) - }) - - t.Run("install cert-manager", func(t *testing.T) { - env.installCertManager(t) - }) - - if t.Failed() { - return - } - - t.Run("install actions-runner-controller and runners", func(t *testing.T) { - env.installActionsRunnerController(t) - }) - - if t.Failed() { - return - } - - t.Run("Install workflow", func(t *testing.T) { - env.installActionsWorkflow(t) - }) - - if t.Failed() { - return - } - - t.Run("Verify workflow run result", func(t *testing.T) { - env.verifyActionsWorkflowRun(t) - }) - - if os.Getenv("ARC_E2E_NO_CLEANUP") != "" { + if retainCluster { t.FailNow() } } @@ -194,23 +248,20 @@ func TestE2ERunnerDeploy(t *testing.T) { type env struct { *testing.Env - useRunnerSet bool // Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY // to let ARC authenticate as a GitHub App useApp bool - testID string - testName string - repoToCommit string - appID, appInstallationID, appPrivateKeyFile string - runnerLabel, githubToken, testRepo, testOrg, testOrgRepo string - githubTokenWebhook string - testEnterprise string - testEphemeral string - scaleDownDelaySecondsAfterScaleOut int64 - minReplicas int64 - dockerdWithinRunnerContainer bool - testJobs []job + testName string + repoToCommit string + appID, appInstallationID, appPrivateKeyFile string + githubToken, testRepo, testOrg, testOrgRepo string + githubTokenWebhook string + testEnterprise string + testEphemeral string + scaleDownDelaySecondsAfterScaleOut int64 + minReplicas int64 + dockerdWithinRunnerContainer bool } func initTestEnv(t *testing.T) *env { @@ -220,15 +271,11 @@ func initTestEnv(t *testing.T) *env { e := &env{Env: testingEnv} - id := e.ID() - - testName := t.Name() + " " + id + testName := t.Name() t.Logf("Initializing test with name %s", testName) - e.testID = id e.testName = testName - e.runnerLabel = "test-" + id e.githubToken = testing.Getenv(t, "GITHUB_TOKEN") e.appID = testing.Getenv(t, "GITHUB_APP_ID") e.appInstallationID = testing.Getenv(t, "GITHUB_APP_INSTALLATION_ID") @@ -240,7 +287,6 @@ func initTestEnv(t *testing.T) *env { e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "") e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "") e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "") - e.testJobs = createTestJobs(id, testResultCMNamePrefix, 6) e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32) e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32) @@ -281,7 +327,7 @@ func (e *env) installCertManager(t *testing.T) { e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second)) } -func (e *env) installActionsRunnerController(t *testing.T) { +func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID string) { t.Helper() e.createControllerNamespaceAndServiceAccount(t) @@ -291,25 +337,11 @@ func (e *env) installActionsRunnerController(t *testing.T) { "ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm", } - if e.useRunnerSet { - scriptEnv = append(scriptEnv, "USE_RUNNERSET=1") - } else { - scriptEnv = append(scriptEnv, "USE_RUNNERSET=false") - } - varEnv := []string{ - "TEST_ENTERPRISE=" + e.testEnterprise, - "TEST_REPO=" + e.testRepo, - "TEST_ORG=" + e.testOrg, - "TEST_ORG_REPO=" + e.testOrgRepo, "WEBHOOK_GITHUB_TOKEN=" + e.githubTokenWebhook, - "RUNNER_LABEL=" + e.runnerLabel, - "TEST_ID=" + e.testID, - "TEST_EPHEMERAL=" + e.testEphemeral, - fmt.Sprintf("RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT=%d", e.scaleDownDelaySecondsAfterScaleOut), - fmt.Sprintf("REPO_RUNNER_MIN_REPLICAS=%d", e.minReplicas), - fmt.Sprintf("ORG_RUNNER_MIN_REPLICAS=%d", e.minReplicas), - fmt.Sprintf("ENTERPRISE_RUNNER_MIN_REPLICAS=%d", e.minReplicas), + "TEST_ID=" + testID, + "NAME=" + repo, + "VERSION=" + tag, } if e.useApp { @@ -326,6 +358,54 @@ func (e *env) installActionsRunnerController(t *testing.T) { ) } + scriptEnv = append(scriptEnv, varEnv...) + scriptEnv = append(scriptEnv, commonScriptEnv...) + + e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv}) +} + +func (e *env) deploy(t *testing.T, kind DeployKind, testID string) { + t.Helper() + e.do(t, "apply", kind, testID) +} + +func (e *env) undeploy(t *testing.T, kind DeployKind, testID string) { + t.Helper() + e.do(t, "delete", kind, testID) +} + +func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) { + t.Helper() + + e.createControllerNamespaceAndServiceAccount(t) + + scriptEnv := []string{ + "KUBECONFIG=" + e.Kubeconfig(), + "OP=" + op, + } + + switch kind { + case RunnerSets: + scriptEnv = append(scriptEnv, "USE_RUNNERSET=1") + case RunnerDeployments: + scriptEnv = append(scriptEnv, "USE_RUNNERSET=false") + default: + t.Fatalf("Invalid deploy kind %v", kind) + } + + varEnv := []string{ + "TEST_ENTERPRISE=" + e.testEnterprise, + "TEST_REPO=" + e.testRepo, + "TEST_ORG=" + e.testOrg, + "TEST_ORG_REPO=" + e.testOrgRepo, + "RUNNER_LABEL=" + e.runnerLabel(testID), + "TEST_EPHEMERAL=" + e.testEphemeral, + fmt.Sprintf("RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT=%d", e.scaleDownDelaySecondsAfterScaleOut), + fmt.Sprintf("REPO_RUNNER_MIN_REPLICAS=%d", e.minReplicas), + fmt.Sprintf("ORG_RUNNER_MIN_REPLICAS=%d", e.minReplicas), + fmt.Sprintf("ENTERPRISE_RUNNER_MIN_REPLICAS=%d", e.minReplicas), + } + if e.dockerdWithinRunnerContainer { varEnv = append(varEnv, "RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true", @@ -341,7 +421,11 @@ func (e *env) installActionsRunnerController(t *testing.T) { scriptEnv = append(scriptEnv, varEnv...) scriptEnv = append(scriptEnv, commonScriptEnv...) - e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv}) + e.RunScript(t, "../../acceptance/deploy_runners.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv}) +} + +func (e *env) runnerLabel(testID string) string { + return "test-" + testID } func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) { @@ -351,16 +435,20 @@ func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) { e.KubectlEnsureClusterRoleBindingServiceAccount(t, "default-admin", "cluster-admin", "default:default", testing.KubectlConfig{}) } -func (e *env) installActionsWorkflow(t *testing.T) { +func (e *env) installActionsWorkflow(t *testing.T, kind DeployKind, testID string) { t.Helper() - installActionsWorkflow(t, e.testName, e.runnerLabel, testResultCMNamePrefix, e.repoToCommit, e.testJobs) + installActionsWorkflow(t, e.testName+" "+testID, e.runnerLabel(testID), testResultCMNamePrefix, e.repoToCommit, kind, e.testJobs(testID)) } -func (e *env) verifyActionsWorkflowRun(t *testing.T) { +func (e *env) testJobs(testID string) []job { + return createTestJobs(testID, testResultCMNamePrefix, 6) +} + +func (e *env) verifyActionsWorkflowRun(t *testing.T, testID string) { t.Helper() - verifyActionsWorkflowRun(t, e.Env, e.testJobs) + verifyActionsWorkflowRun(t, e.Env, e.testJobs(testID)) } type job struct { @@ -383,7 +471,7 @@ func createTestJobs(id, testResultCMNamePrefix string, numJobs int) []job { const Branch = "main" -func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, testJobs []job) { +func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, kind DeployKind, testJobs []job) { t.Helper() ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) @@ -415,6 +503,14 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam } if !kubernetesContainerMode { + if kind == RunnerDeployments { + steps = append(steps, + testing.Step{ + Run: "sudo mkdir -p \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"", + }, + ) + } + steps = append(steps, testing.Step{ // This might be the easiest way to handle permissions without use of securityContext @@ -579,5 +675,5 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) { } return results, err - }, 3*60*time.Second, 10*time.Second).Should(gomega.Equal(expected)) + }, 8*60*time.Second, 30*time.Second).Should(gomega.Equal(expected)) } diff --git a/testing/kubectl.go b/testing/kubectl.go index 4eba2cce..fbbc75c6 100644 --- a/testing/kubectl.go +++ b/testing/kubectl.go @@ -86,6 +86,16 @@ func (k *Kubectl) CreateCMLiterals(ctx context.Context, name string, literals ma return nil } +func (k *Kubectl) DeleteCM(ctx context.Context, name string, cfg KubectlConfig) error { + args := []string{"cm", name} + + if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "delete", args, cfg)); err != nil { + return err + } + + return nil +} + func (k *Kubectl) Apply(ctx context.Context, path string, cfg KubectlConfig) error { if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil { return err diff --git a/testing/testing.go b/testing/testing.go index 722a09c6..fea40ee6 100644 --- a/testing/testing.go +++ b/testing/testing.go @@ -32,7 +32,6 @@ type Env struct { docker *Docker Kubectl *Kubectl bash *Bash - id string } func Start(t *testing.T, opts ...Option) *Env { @@ -56,7 +55,11 @@ func Start(t *testing.T, opts ...Option) *Env { env.bash = bash - // + return &env +} + +func (e *Env) GetOrGenerateTestID(t *testing.T) string { + k, kctl := e.kind, e.Kubectl cmKey := "id" @@ -82,13 +85,27 @@ func Start(t *testing.T, opts ...Option) *Env { } } - env.id = m[cmKey] - - return &env + return m[cmKey] } -func (e *Env) ID() string { - return e.id +func (e *Env) DeleteTestID(t *testing.T) { + k, kctl := e.kind, e.Kubectl + + kubectlEnv := []string{ + "KUBECONFIG=" + k.Kubeconfig(), + } + + cmCfg := KubectlConfig{ + Env: kubectlEnv, + } + testInfoName := "test-info" + + ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) + defer cancel() + + if err := kctl.DeleteCM(ctx, testInfoName, cmCfg); err != nil { + t.Fatal(err) + } } func (e *Env) DockerBuild(t *testing.T, builds []DockerBuild) {