Cover ARC upgrade in E2E test (#1592)

* Cover ARC upgrade in E2E test

so that we can make it extra sure that you can upgrade the existing installation of ARC to the next and also (hopefully) it is backward-compatible, or at least it does not break immediately after upgrading.

* Consolidate E2E tests for RS and RD

* Fix E2E for RD to pass

* Add some comment in E2E for how to release disk consumed after dozens of test runs
This commit is contained in:
Yusuke Kuoka 2022-07-01 21:32:05 +09:00 committed by GitHub
parent d62c8a4697
commit 4446ba57e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 296 additions and 168 deletions

View File

@ -76,56 +76,3 @@ kubectl -n actions-runner-system wait deploy/actions-runner-controller --for con
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready # Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
sleep 20 sleep 20
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
if [ -n "${TEST_REPO}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl apply -f -
else
echo 'Deploying runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead.'
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl apply -f -
fi
else
echo 'Skipped deploying runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy.'
fi
if [ -n "${TEST_ORG}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl apply -f -
else
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl apply -f -
fi
if [ -n "${TEST_ORG_GROUP}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerset envsubst | kubectl apply -f -
else
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl apply -f -
fi
else
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ORG_GROUP to deploy.'
fi
else
echo 'Skipped deploying organizational runnerdeployment. Set TEST_ORG to deploy.'
fi
if [ -n "${TEST_ENTERPRISE}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl apply -f -
else
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl apply -f -
fi
if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl apply -f -
else
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl apply -f -
fi
else
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to deploy.'
fi
else
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE to deploy.'
fi

58
acceptance/deploy_runners.sh Executable file
View File

@ -0,0 +1,58 @@
#!/usr/bin/env bash
set -e
OP=${OP:-apply}
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
if [ -n "${TEST_REPO}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f -
else
echo "Running ${OP} runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead."
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl ${OP} -f -
fi
else
echo "Skipped ${OP} for runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy."
fi
if [ -n "${TEST_ORG}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl ${OP} -f -
else
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl ${OP} -f -
fi
if [ -n "${TEST_ORG_GROUP}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerset envsubst | kubectl ${OP} -f -
else
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl ${OP} -f -
fi
else
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ORG_GROUP to ${OP}."
fi
else
echo "Skipped ${OP} on organizational runnerdeployment. Set TEST_ORG to ${OP}."
fi
if [ -n "${TEST_ENTERPRISE}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl ${OP} -f -
else
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl ${OP} -f -
fi
if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then
if [ "${USE_RUNNERSET}" != "false" ]; then
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl ${OP} -f -
else
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl ${OP} -f -
fi
else
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to ${OP}."
fi
else
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE to ${OP}."
fi

View File

@ -13,6 +13,13 @@ import (
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
) )
type DeployKind int
const (
RunnerSets DeployKind = iota
RunnerDeployments
)
var ( var (
controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller" controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller"
controllerImageTag = "e2e" controllerImageTag = "e2e"
@ -72,8 +79,6 @@ var (
commonScriptEnv = []string{ commonScriptEnv = []string{
"SYNC_PERIOD=" + "30s", "SYNC_PERIOD=" + "30s",
"NAME=" + controllerImageRepo,
"VERSION=" + controllerImageTag,
"RUNNER_TAG=" + runnerImageTag, "RUNNER_TAG=" + runnerImageTag,
} }
@ -101,13 +106,27 @@ var (
// whenever the whole test failed, so that you can immediately start fixing issues and rerun inidividual tests. // whenever the whole test failed, so that you can immediately start fixing issues and rerun inidividual tests.
// See the below link for how terratest handles this: // See the below link for how terratest handles this:
// https://terratest.gruntwork.io/docs/testing-best-practices/iterating-locally-using-test-stages/ // https://terratest.gruntwork.io/docs/testing-best-practices/iterating-locally-using-test-stages/
//
// This functions leaves PVs undeleted. To delete PVs, run:
// kubectl get pv -ojson | jq -rMc '.items[] | select(.status.phase == "Available") | {name:.metadata.name, status:.status.phase} | .name' | xargs kubectl delete pv
//
// If you disk full after dozens of test runs, try:
// docker system prune
// and
// kind delete cluster --name teste2e
//
// The former tend to release 200MB-3GB and the latter can result in releasing like 100GB due to kind node contains loaded container images and
// (in case you use it) local provisioners disk image(which is implemented as a directory within the kind node).
func TestE2E(t *testing.T) { func TestE2E(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("Skipped as -short is set") t.Skip("Skipped as -short is set")
} }
skipRunnerCleanUp := os.Getenv("ARC_E2E_SKIP_RUNNER_CLEANUP") != ""
retainCluster := os.Getenv("ARC_E2E_RETAIN_CLUSTER") != ""
skipTestIDCleanUp := os.Getenv("ARC_E2E_SKIP_TEST_ID_CLEANUP") != ""
env := initTestEnv(t) env := initTestEnv(t)
env.useRunnerSet = true
t.Run("build and load images", func(t *testing.T) { t.Run("build and load images", func(t *testing.T) {
env.buildAndLoadImages(t) env.buildAndLoadImages(t)
@ -121,8 +140,37 @@ func TestE2E(t *testing.T) {
return return
} }
t.Run("install actions-runner-controller and runners", func(t *testing.T) { t.Run("RunnerSets", func(t *testing.T) {
env.installActionsRunnerController(t) var (
testID string
)
t.Run("get or generate test ID", func(t *testing.T) {
testID = env.GetOrGenerateTestID(t)
})
if !skipTestIDCleanUp {
t.Cleanup(func() {
env.DeleteTestID(t)
})
}
t.Run("install actions-runner-controller v0.24.1", func(t *testing.T) {
env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
})
t.Run("deploy runners", func(t *testing.T) {
env.deploy(t, RunnerSets, testID)
})
if !skipRunnerCleanUp {
t.Cleanup(func() {
env.undeploy(t, RunnerSets, testID)
})
}
t.Run("install edge actions-runner-controller", func(t *testing.T) {
env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID)
}) })
if t.Failed() { if t.Failed() {
@ -130,7 +178,7 @@ func TestE2E(t *testing.T) {
} }
t.Run("Install workflow", func(t *testing.T) { t.Run("Install workflow", func(t *testing.T) {
env.installActionsWorkflow(t) env.installActionsWorkflow(t, RunnerSets, testID)
}) })
if t.Failed() { if t.Failed() {
@ -138,36 +186,41 @@ func TestE2E(t *testing.T) {
} }
t.Run("Verify workflow run result", func(t *testing.T) { t.Run("Verify workflow run result", func(t *testing.T) {
env.verifyActionsWorkflowRun(t) env.verifyActionsWorkflowRun(t, testID)
})
}) })
if os.Getenv("ARC_E2E_NO_CLEANUP") != "" { t.Run("RunnerDeployments", func(t *testing.T) {
t.FailNow() var (
} testID string
} )
func TestE2ERunnerDeploy(t *testing.T) { t.Run("get or generate test ID", func(t *testing.T) {
if testing.Short() { testID = env.GetOrGenerateTestID(t)
t.Skip("Skipped as -short is set")
}
env := initTestEnv(t)
env.useApp = true
t.Run("build and load images", func(t *testing.T) {
env.buildAndLoadImages(t)
}) })
t.Run("install cert-manager", func(t *testing.T) { if !skipTestIDCleanUp {
env.installCertManager(t) t.Cleanup(func() {
env.DeleteTestID(t)
}) })
if t.Failed() {
return
} }
t.Run("install actions-runner-controller and runners", func(t *testing.T) { t.Run("install actions-runner-controller v0.24.1", func(t *testing.T) {
env.installActionsRunnerController(t) env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
})
t.Run("deploy runners", func(t *testing.T) {
env.deploy(t, RunnerDeployments, testID)
})
if !skipRunnerCleanUp {
t.Cleanup(func() {
env.undeploy(t, RunnerDeployments, testID)
})
}
t.Run("install edge actions-runner-controller", func(t *testing.T) {
env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID)
}) })
if t.Failed() { if t.Failed() {
@ -175,7 +228,7 @@ func TestE2ERunnerDeploy(t *testing.T) {
} }
t.Run("Install workflow", func(t *testing.T) { t.Run("Install workflow", func(t *testing.T) {
env.installActionsWorkflow(t) env.installActionsWorkflow(t, RunnerDeployments, testID)
}) })
if t.Failed() { if t.Failed() {
@ -183,10 +236,11 @@ func TestE2ERunnerDeploy(t *testing.T) {
} }
t.Run("Verify workflow run result", func(t *testing.T) { t.Run("Verify workflow run result", func(t *testing.T) {
env.verifyActionsWorkflowRun(t) env.verifyActionsWorkflowRun(t, testID)
})
}) })
if os.Getenv("ARC_E2E_NO_CLEANUP") != "" { if retainCluster {
t.FailNow() t.FailNow()
} }
} }
@ -194,23 +248,20 @@ func TestE2ERunnerDeploy(t *testing.T) {
type env struct { type env struct {
*testing.Env *testing.Env
useRunnerSet bool
// Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY // Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY
// to let ARC authenticate as a GitHub App // to let ARC authenticate as a GitHub App
useApp bool useApp bool
testID string
testName string testName string
repoToCommit string repoToCommit string
appID, appInstallationID, appPrivateKeyFile string appID, appInstallationID, appPrivateKeyFile string
runnerLabel, githubToken, testRepo, testOrg, testOrgRepo string githubToken, testRepo, testOrg, testOrgRepo string
githubTokenWebhook string githubTokenWebhook string
testEnterprise string testEnterprise string
testEphemeral string testEphemeral string
scaleDownDelaySecondsAfterScaleOut int64 scaleDownDelaySecondsAfterScaleOut int64
minReplicas int64 minReplicas int64
dockerdWithinRunnerContainer bool dockerdWithinRunnerContainer bool
testJobs []job
} }
func initTestEnv(t *testing.T) *env { func initTestEnv(t *testing.T) *env {
@ -220,15 +271,11 @@ func initTestEnv(t *testing.T) *env {
e := &env{Env: testingEnv} e := &env{Env: testingEnv}
id := e.ID() testName := t.Name()
testName := t.Name() + " " + id
t.Logf("Initializing test with name %s", testName) t.Logf("Initializing test with name %s", testName)
e.testID = id
e.testName = testName e.testName = testName
e.runnerLabel = "test-" + id
e.githubToken = testing.Getenv(t, "GITHUB_TOKEN") e.githubToken = testing.Getenv(t, "GITHUB_TOKEN")
e.appID = testing.Getenv(t, "GITHUB_APP_ID") e.appID = testing.Getenv(t, "GITHUB_APP_ID")
e.appInstallationID = testing.Getenv(t, "GITHUB_APP_INSTALLATION_ID") e.appInstallationID = testing.Getenv(t, "GITHUB_APP_INSTALLATION_ID")
@ -240,7 +287,6 @@ func initTestEnv(t *testing.T) *env {
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "") e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "")
e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "") e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "")
e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "") e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "")
e.testJobs = createTestJobs(id, testResultCMNamePrefix, 6)
e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32) e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32)
e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32) e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32)
@ -281,7 +327,7 @@ func (e *env) installCertManager(t *testing.T) {
e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second)) e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second))
} }
func (e *env) installActionsRunnerController(t *testing.T) { func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID string) {
t.Helper() t.Helper()
e.createControllerNamespaceAndServiceAccount(t) e.createControllerNamespaceAndServiceAccount(t)
@ -291,25 +337,11 @@ func (e *env) installActionsRunnerController(t *testing.T) {
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm", "ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
} }
if e.useRunnerSet {
scriptEnv = append(scriptEnv, "USE_RUNNERSET=1")
} else {
scriptEnv = append(scriptEnv, "USE_RUNNERSET=false")
}
varEnv := []string{ varEnv := []string{
"TEST_ENTERPRISE=" + e.testEnterprise,
"TEST_REPO=" + e.testRepo,
"TEST_ORG=" + e.testOrg,
"TEST_ORG_REPO=" + e.testOrgRepo,
"WEBHOOK_GITHUB_TOKEN=" + e.githubTokenWebhook, "WEBHOOK_GITHUB_TOKEN=" + e.githubTokenWebhook,
"RUNNER_LABEL=" + e.runnerLabel, "TEST_ID=" + testID,
"TEST_ID=" + e.testID, "NAME=" + repo,
"TEST_EPHEMERAL=" + e.testEphemeral, "VERSION=" + tag,
fmt.Sprintf("RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT=%d", e.scaleDownDelaySecondsAfterScaleOut),
fmt.Sprintf("REPO_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
fmt.Sprintf("ORG_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
fmt.Sprintf("ENTERPRISE_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
} }
if e.useApp { if e.useApp {
@ -326,6 +358,54 @@ func (e *env) installActionsRunnerController(t *testing.T) {
) )
} }
scriptEnv = append(scriptEnv, varEnv...)
scriptEnv = append(scriptEnv, commonScriptEnv...)
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
}
func (e *env) deploy(t *testing.T, kind DeployKind, testID string) {
t.Helper()
e.do(t, "apply", kind, testID)
}
func (e *env) undeploy(t *testing.T, kind DeployKind, testID string) {
t.Helper()
e.do(t, "delete", kind, testID)
}
func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) {
t.Helper()
e.createControllerNamespaceAndServiceAccount(t)
scriptEnv := []string{
"KUBECONFIG=" + e.Kubeconfig(),
"OP=" + op,
}
switch kind {
case RunnerSets:
scriptEnv = append(scriptEnv, "USE_RUNNERSET=1")
case RunnerDeployments:
scriptEnv = append(scriptEnv, "USE_RUNNERSET=false")
default:
t.Fatalf("Invalid deploy kind %v", kind)
}
varEnv := []string{
"TEST_ENTERPRISE=" + e.testEnterprise,
"TEST_REPO=" + e.testRepo,
"TEST_ORG=" + e.testOrg,
"TEST_ORG_REPO=" + e.testOrgRepo,
"RUNNER_LABEL=" + e.runnerLabel(testID),
"TEST_EPHEMERAL=" + e.testEphemeral,
fmt.Sprintf("RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT=%d", e.scaleDownDelaySecondsAfterScaleOut),
fmt.Sprintf("REPO_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
fmt.Sprintf("ORG_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
fmt.Sprintf("ENTERPRISE_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
}
if e.dockerdWithinRunnerContainer { if e.dockerdWithinRunnerContainer {
varEnv = append(varEnv, varEnv = append(varEnv,
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true", "RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true",
@ -341,7 +421,11 @@ func (e *env) installActionsRunnerController(t *testing.T) {
scriptEnv = append(scriptEnv, varEnv...) scriptEnv = append(scriptEnv, varEnv...)
scriptEnv = append(scriptEnv, commonScriptEnv...) scriptEnv = append(scriptEnv, commonScriptEnv...)
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv}) e.RunScript(t, "../../acceptance/deploy_runners.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
}
func (e *env) runnerLabel(testID string) string {
return "test-" + testID
} }
func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) { func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
@ -351,16 +435,20 @@ func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
e.KubectlEnsureClusterRoleBindingServiceAccount(t, "default-admin", "cluster-admin", "default:default", testing.KubectlConfig{}) e.KubectlEnsureClusterRoleBindingServiceAccount(t, "default-admin", "cluster-admin", "default:default", testing.KubectlConfig{})
} }
func (e *env) installActionsWorkflow(t *testing.T) { func (e *env) installActionsWorkflow(t *testing.T, kind DeployKind, testID string) {
t.Helper() t.Helper()
installActionsWorkflow(t, e.testName, e.runnerLabel, testResultCMNamePrefix, e.repoToCommit, e.testJobs) installActionsWorkflow(t, e.testName+" "+testID, e.runnerLabel(testID), testResultCMNamePrefix, e.repoToCommit, kind, e.testJobs(testID))
} }
func (e *env) verifyActionsWorkflowRun(t *testing.T) { func (e *env) testJobs(testID string) []job {
return createTestJobs(testID, testResultCMNamePrefix, 6)
}
func (e *env) verifyActionsWorkflowRun(t *testing.T, testID string) {
t.Helper() t.Helper()
verifyActionsWorkflowRun(t, e.Env, e.testJobs) verifyActionsWorkflowRun(t, e.Env, e.testJobs(testID))
} }
type job struct { type job struct {
@ -383,7 +471,7 @@ func createTestJobs(id, testResultCMNamePrefix string, numJobs int) []job {
const Branch = "main" const Branch = "main"
func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, testJobs []job) { func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, kind DeployKind, testJobs []job) {
t.Helper() t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
@ -415,6 +503,14 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
} }
if !kubernetesContainerMode { if !kubernetesContainerMode {
if kind == RunnerDeployments {
steps = append(steps,
testing.Step{
Run: "sudo mkdir -p \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"",
},
)
}
steps = append(steps, steps = append(steps,
testing.Step{ testing.Step{
// This might be the easiest way to handle permissions without use of securityContext // This might be the easiest way to handle permissions without use of securityContext
@ -579,5 +675,5 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
} }
return results, err return results, err
}, 3*60*time.Second, 10*time.Second).Should(gomega.Equal(expected)) }, 8*60*time.Second, 30*time.Second).Should(gomega.Equal(expected))
} }

View File

@ -86,6 +86,16 @@ func (k *Kubectl) CreateCMLiterals(ctx context.Context, name string, literals ma
return nil return nil
} }
func (k *Kubectl) DeleteCM(ctx context.Context, name string, cfg KubectlConfig) error {
args := []string{"cm", name}
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "delete", args, cfg)); err != nil {
return err
}
return nil
}
func (k *Kubectl) Apply(ctx context.Context, path string, cfg KubectlConfig) error { func (k *Kubectl) Apply(ctx context.Context, path string, cfg KubectlConfig) error {
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil { if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil {
return err return err

View File

@ -32,7 +32,6 @@ type Env struct {
docker *Docker docker *Docker
Kubectl *Kubectl Kubectl *Kubectl
bash *Bash bash *Bash
id string
} }
func Start(t *testing.T, opts ...Option) *Env { func Start(t *testing.T, opts ...Option) *Env {
@ -56,7 +55,11 @@ func Start(t *testing.T, opts ...Option) *Env {
env.bash = bash env.bash = bash
// return &env
}
func (e *Env) GetOrGenerateTestID(t *testing.T) string {
k, kctl := e.kind, e.Kubectl
cmKey := "id" cmKey := "id"
@ -82,13 +85,27 @@ func Start(t *testing.T, opts ...Option) *Env {
} }
} }
env.id = m[cmKey] return m[cmKey]
return &env
} }
func (e *Env) ID() string { func (e *Env) DeleteTestID(t *testing.T) {
return e.id k, kctl := e.kind, e.Kubectl
kubectlEnv := []string{
"KUBECONFIG=" + k.Kubeconfig(),
}
cmCfg := KubectlConfig{
Env: kubectlEnv,
}
testInfoName := "test-info"
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
if err := kctl.DeleteCM(ctx, testInfoName, cmCfg); err != nil {
t.Fatal(err)
}
} }
func (e *Env) DockerBuild(t *testing.T, builds []DockerBuild) { func (e *Env) DockerBuild(t *testing.T, builds []DockerBuild) {