e2e: Cover RunnerDeployment (#668)

Previously the E2E test suite covered only RunnerSet. This refactors the existing E2E test code to extract the common test structure into a `env` struct and its methods, and use it to write two very similar tests, one for RunnerSet and another for RunnerDeployment.
This commit is contained in:
Yusuke Kuoka 2021-06-29 17:52:43 +09:00 committed by GitHub
parent 4ec57d3e39
commit c78116b0f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 792 additions and 474 deletions

View File

@ -211,10 +211,11 @@ acceptance/deploy:
acceptance/tests: acceptance/tests:
acceptance/checks.sh acceptance/checks.sh
# We use -count=1 instead of `go clean -testcache`
# See https://terratest.gruntwork.io/docs/testing-best-practices/avoid-test-caching/
.PHONY: e2e .PHONY: e2e
e2e: e2e:
go clean -testcache go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
go test -v -timeout 600s -run '^TestE2E$$' ./test/e2e
# Upload release file to GitHub. # Upload release file to GitHub.
github-release: release github-release: release

View File

@ -3,8 +3,6 @@ package e2e
import ( import (
"context" "context"
"fmt" "fmt"
"math/rand"
"os"
"path/filepath" "path/filepath"
"time" "time"
@ -14,19 +12,12 @@ import (
) )
var ( var (
Img = func(repo, tag string) testing.ContainerImage {
return testing.ContainerImage{
Repo: repo,
Tag: tag,
}
}
controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller" controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller"
controllerImageTag = "e2e" controllerImageTag = "e2e"
controllerImage = Img(controllerImageRepo, controllerImageTag) controllerImage = testing.Img(controllerImageRepo, controllerImageTag)
runnerImageRepo = "actionsrunnercontrollere2e/actions-runner" runnerImageRepo = "actionsrunnercontrollere2e/actions-runner"
runnerImageTag = "e2e" runnerImageTag = "e2e"
runnerImage = Img(runnerImageRepo, runnerImageTag) runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
prebuildImages = []testing.ContainerImage{ prebuildImages = []testing.ContainerImage{
controllerImage, controllerImage,
@ -49,12 +40,22 @@ var (
certManagerVersion = "v1.1.1" certManagerVersion = "v1.1.1"
images = []testing.ContainerImage{ images = []testing.ContainerImage{
Img("docker", "dind"), testing.Img("docker", "dind"),
Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"), testing.Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"),
Img("quay.io/jetstack/cert-manager-controller", certManagerVersion), testing.Img("quay.io/jetstack/cert-manager-controller", certManagerVersion),
Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion), testing.Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion),
Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion), testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
} }
commonScriptEnv = []string{
"SYNC_PERIOD=" + "10s",
"NAME=" + controllerImageRepo,
"VERSION=" + controllerImageTag,
"RUNNER_NAME=" + runnerImageRepo,
"RUNNER_TAG=" + runnerImageTag,
}
testResultCMNamePrefix = "test-result-"
) )
// If you're willing to run this test via VS Code "run test" or "debug test", // If you're willing to run this test via VS Code "run test" or "debug test",
@ -71,141 +72,208 @@ var (
// This function requires a few environment variables to be set to provide some test data. // This function requires a few environment variables to be set to provide some test data.
// If you're using VS Code and wanting to run this test locally, // If you're using VS Code and wanting to run this test locally,
// Browse "Workspace Settings" and search for "go test env file" and put e.g. "${workspaceFolder}/.test.env" there. // Browse "Workspace Settings" and search for "go test env file" and put e.g. "${workspaceFolder}/.test.env" there.
//
// Instead of relying on "stages" to make it possible to rerun individual tests like terratest,
// you use the "run subtest" feature provided by IDE like VS Code, IDEA, and GoLand.
// Our `testing` package automatically checks for the running test name and skips the cleanup tasks
// whenever the whole test failed, so that you can immediately start fixing issues and rerun inidividual tests.
// See the below link for how terratest handles this:
// https://terratest.gruntwork.io/docs/testing-best-practices/iterating-locally-using-test-stages/
func TestE2E(t *testing.T) { func TestE2E(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("Skipped as -short is set") t.Skip("Skipped as -short is set")
} }
k := testing.Start(t, testing.Cluster{}, testing.Preload(images...)) env := initTestEnv(t)
env.useRunnerSet = true
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) t.Run("build and load images", func(t *testing.T) {
defer cancel() env.buildAndLoadImages(t)
t.Run("build images", func(t *testing.T) {
if err := k.BuildImages(ctx, builds); err != nil {
t.Fatal(err)
}
}) })
t.Run("load images", func(t *testing.T) {
if err := k.LoadImages(ctx, prebuildImages); err != nil {
t.Fatal(err)
}
})
kubectlEnv := []string{
"KUBECONFIG=" + k.Kubeconfig(),
}
t.Run("install cert-manager", func(t *testing.T) { t.Run("install cert-manager", func(t *testing.T) {
applyCfg := testing.KubectlConfig{NoValidate: true, Env: kubectlEnv} env.installCertManager(t)
if err := k.Apply(ctx, fmt.Sprintf("https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml", certManagerVersion), applyCfg); err != nil {
t.Fatal(err)
}
waitCfg := testing.KubectlConfig{
Env: kubectlEnv,
Namespace: "cert-manager",
Timeout: 90 * time.Second,
}
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager-cainjector", waitCfg); err != nil {
t.Fatal(err)
}
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager-webhook", waitCfg.WithTimeout(60*time.Second)); err != nil {
t.Fatal(err)
}
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager", waitCfg.WithTimeout(60*time.Second)); err != nil {
t.Fatal(err)
}
if err := k.RunKubectlEnsureNS(ctx, "actions-runner-system", testing.KubectlConfig{Env: kubectlEnv}); err != nil {
t.Fatal(err)
}
}) })
t.Run("make default serviceaccount cluster-admin", func(t *testing.T) {
cfg := testing.KubectlConfig{Env: kubectlEnv}
bindingName := "default-admin"
if _, err := k.GetClusterRoleBinding(ctx, bindingName, cfg); err != nil {
if err := k.CreateClusterRoleBindingServiceAccount(ctx, bindingName, "cluster-admin", "default:default", cfg); err != nil {
t.Fatal(err)
}
}
})
cmCfg := testing.KubectlConfig{
Env: kubectlEnv,
}
testInfoName := "test-info"
m, _ := k.GetCMLiterals(ctx, testInfoName, cmCfg)
t.Run("Save test ID", func(t *testing.T) {
if m == nil {
id := RandStringBytesRmndr(10)
m = map[string]string{"id": id}
if err := k.CreateCMLiterals(ctx, testInfoName, m, cmCfg); err != nil {
t.Fatal(err)
}
}
})
id := m["id"]
runnerLabel := "test-" + id
testID := t.Name() + " " + id
t.Logf("Using test id %s", testID)
githubToken := getenv(t, "GITHUB_TOKEN")
testRepo := getenv(t, "TEST_REPO")
testOrg := getenv(t, "TEST_ORG")
testOrgRepo := getenv(t, "TEST_ORG_REPO")
if t.Failed() { if t.Failed() {
return return
} }
t.Run("install actions-runner-controller and runners", func(t *testing.T) { t.Run("install actions-runner-controller and runners", func(t *testing.T) {
scriptEnv := []string{ env.installActionsRunnerController(t)
"KUBECONFIG=" + k.Kubeconfig(),
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
"ACCEPTANCE_TEST_SECRET_TYPE=token",
"NAME=" + controllerImageRepo,
"VERSION=" + controllerImageTag,
"RUNNER_NAME=" + runnerImageRepo,
"RUNNER_TAG=" + runnerImageTag,
"TEST_REPO=" + testRepo,
"TEST_ORG=" + testOrg,
"TEST_ORG_REPO=" + testOrgRepo,
"SYNC_PERIOD=" + "10s",
"USE_RUNNERSET=" + "1",
"GITHUB_TOKEN=" + githubToken,
"RUNNER_LABEL=" + runnerLabel,
}
if err := k.RunScript(ctx, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv}); err != nil {
t.Fatal(err)
}
}) })
testResultCMNamePrefix := "test-result-"
if t.Failed() { if t.Failed() {
return return
} }
numJobs := 2 t.Run("Install workflow", func(t *testing.T) {
env.installActionsWorkflow(t)
})
type job struct { if t.Failed() {
name, testArg, configMapName string return
} }
t.Run("Verify workflow run result", func(t *testing.T) {
env.verifyActionsWorkflowRun(t)
})
}
func TestE2ERunnerDeploy(t *testing.T) {
if testing.Short() {
t.Skip("Skipped as -short is set")
}
env := initTestEnv(t)
t.Run("build and load images", func(t *testing.T) {
env.buildAndLoadImages(t)
})
t.Run("install cert-manager", func(t *testing.T) {
env.installCertManager(t)
})
if t.Failed() {
return
}
t.Run("install actions-runner-controller and runners", func(t *testing.T) {
env.installActionsRunnerController(t)
})
if t.Failed() {
return
}
t.Run("Install workflow", func(t *testing.T) {
env.installActionsWorkflow(t)
})
if t.Failed() {
return
}
t.Run("Verify workflow run result", func(t *testing.T) {
env.verifyActionsWorkflowRun(t)
})
}
type env struct {
*testing.Env
useRunnerSet bool
testID string
runnerLabel, githubToken, testRepo, testOrg, testOrgRepo string
testJobs []job
}
func initTestEnv(t *testing.T) *env {
t.Helper()
testingEnv := testing.Start(t, testing.Preload(images...))
e := &env{Env: testingEnv}
id := e.ID()
testID := t.Name() + " " + id
t.Logf("Using test id %s", testID)
e.testID = testID
e.runnerLabel = "test-" + id
e.githubToken = testing.Getenv(t, "GITHUB_TOKEN")
e.testRepo = testing.Getenv(t, "TEST_REPO")
e.testOrg = testing.Getenv(t, "TEST_ORG")
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO")
e.testJobs = createTestJobs(id, testResultCMNamePrefix, 2)
return e
}
func (e *env) f() {
}
func (e *env) buildAndLoadImages(t *testing.T) {
t.Helper()
e.DockerBuild(t, builds)
e.KindLoadImages(t, prebuildImages)
}
func (e *env) installCertManager(t *testing.T) {
t.Helper()
applyCfg := testing.KubectlConfig{NoValidate: true}
e.KubectlApply(t, fmt.Sprintf("https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml", certManagerVersion), applyCfg)
waitCfg := testing.KubectlConfig{
Namespace: "cert-manager",
Timeout: 90 * time.Second,
}
e.KubectlWaitUntilDeployAvailable(t, "cert-manager-cainjector", waitCfg)
e.KubectlWaitUntilDeployAvailable(t, "cert-manager-webhook", waitCfg.WithTimeout(60*time.Second))
e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second))
}
func (e *env) installActionsRunnerController(t *testing.T) {
t.Helper()
e.createControllerNamespaceAndServiceAccount(t)
scriptEnv := []string{
"KUBECONFIG=" + e.Kubeconfig(),
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
"ACCEPTANCE_TEST_SECRET_TYPE=token",
}
if e.useRunnerSet {
scriptEnv = append(scriptEnv, "USE_RUNNERSET=1")
}
varEnv := []string{
"TEST_REPO=" + e.testRepo,
"TEST_ORG=" + e.testOrg,
"TEST_ORG_REPO=" + e.testOrgRepo,
"GITHUB_TOKEN=" + e.githubToken,
"RUNNER_LABEL=" + e.runnerLabel,
}
scriptEnv = append(scriptEnv, varEnv...)
scriptEnv = append(scriptEnv, commonScriptEnv...)
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
}
func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
t.Helper()
e.KubectlEnsureNS(t, "actions-runner-system", testing.KubectlConfig{})
e.KubectlEnsureClusterRoleBindingServiceAccount(t, "default-admin", "cluster-admin", "default:default", testing.KubectlConfig{})
}
func (e *env) installActionsWorkflow(t *testing.T) {
t.Helper()
installActionsWorkflow(t, e.testID, e.runnerLabel, testResultCMNamePrefix, e.testRepo, e.testJobs)
}
func (e *env) verifyActionsWorkflowRun(t *testing.T) {
t.Helper()
verifyActionsWorkflowRun(t, e.Env, e.testJobs)
}
type job struct {
name, testArg, configMapName string
}
func createTestJobs(id, testResultCMNamePrefix string, numJobs int) []job {
var testJobs []job var testJobs []job
for i := 0; i < numJobs; i++ { for i := 0; i < numJobs; i++ {
@ -216,45 +284,52 @@ func TestE2E(t *testing.T) {
testJobs = append(testJobs, job{name: name, testArg: testArg, configMapName: configMapName}) testJobs = append(testJobs, job{name: name, testArg: testArg, configMapName: configMapName})
} }
t.Run("Install workflow", func(t *testing.T) { return testJobs
wfName := "E2E " + testID }
wf := testing.Workflow{
Name: wfName, func installActionsWorkflow(t *testing.T, testID, runnerLabel, testResultCMNamePrefix, testRepo string, testJobs []job) {
On: testing.On{ t.Helper()
Push: &testing.Push{
Branches: []string{"main"}, ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
wfName := "E2E " + testID
wf := testing.Workflow{
Name: wfName,
On: testing.On{
Push: &testing.Push{
Branches: []string{"main"},
},
},
Jobs: map[string]testing.Job{},
}
for _, j := range testJobs {
wf.Jobs[j.name] = testing.Job{
RunsOn: runnerLabel,
Steps: []testing.Step{
{
Uses: testing.ActionsCheckoutV2,
},
{
Uses: "azure/setup-kubectl@v1",
With: &testing.With{
Version: "v1.20.2",
},
},
{
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg),
}, },
}, },
Jobs: map[string]testing.Job{},
} }
}
for i := 0; i < numJobs; i++ { wfContent, err := yaml.Marshal(wf)
j := testJobs[i] if err != nil {
wf.Jobs[j.name] = testing.Job{ t.Fatal(err)
RunsOn: runnerLabel, }
Steps: []testing.Step{
{
Uses: testing.ActionsCheckoutV2,
},
{
Uses: "azure/setup-kubectl@v1",
With: &testing.With{
Version: "v1.20.2",
},
},
{
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg),
},
},
}
}
wfContent, err := yaml.Marshal(wf) script := []byte(fmt.Sprintf(`#!/usr/bin/env bash
if err != nil {
t.Fatal(err)
}
script := []byte(fmt.Sprintf(`#!/usr/bin/env bash
set -vx set -vx
name=$1 name=$1
id=$2 id=$2
@ -263,87 +338,70 @@ kubectl delete cm %s$id || true
kubectl create cm %s$id --from-literal=status=ok kubectl create cm %s$id --from-literal=status=ok
`, testResultCMNamePrefix, testResultCMNamePrefix)) `, testResultCMNamePrefix, testResultCMNamePrefix))
g := testing.GitRepo{ g := testing.GitRepo{
Dir: filepath.Join(t.TempDir(), "gitrepo"), Dir: filepath.Join(t.TempDir(), "gitrepo"),
Name: testRepo, Name: testRepo,
CommitMessage: wfName, CommitMessage: wfName,
Contents: map[string][]byte{ Contents: map[string][]byte{
".github/workflows/workflow.yaml": wfContent, ".github/workflows/workflow.yaml": wfContent,
"test.sh": script, "test.sh": script,
}, },
}
if err := g.Sync(ctx); err != nil {
t.Fatal(err)
}
})
if t.Failed() {
return
} }
t.Run("Verify workflow run result", func(t *testing.T) { if err := g.Sync(ctx); err != nil {
var expected []string t.Fatal(err)
}
for i := 0; i < numJobs; i++ {
expected = append(expected, "ok")
}
gomega.NewGomegaWithT(t).Eventually(func() ([]string, error) {
var results []string
var errs []error
for i := 0; i < numJobs; i++ {
testResultCMName := testJobs[i].configMapName
m, err := k.GetCMLiterals(ctx, testResultCMName, cmCfg)
if err != nil {
errs = append(errs, err)
} else {
result := m["status"]
results = append(results, result)
}
}
var err error
if len(errs) > 0 {
var msg string
for i, e := range errs {
msg += fmt.Sprintf("error%d: %v\n", i, e)
}
err = fmt.Errorf("%d errors occurred: %s", len(errs), msg)
}
return results, err
}, 60*time.Second, 10*time.Second).Should(gomega.Equal(expected))
})
} }
func getenv(t *testing.T, name string) string { func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
t.Helper() t.Helper()
v := os.Getenv(name) var expected []string
if v == "" {
t.Fatal(name + " must be set") for _ = range testJobs {
expected = append(expected, "ok")
} }
return v
}
func init() { gomega.NewGomegaWithT(t).Eventually(func() ([]string, error) {
rand.Seed(time.Now().UnixNano()) var results []string
}
const letterBytes = "abcdefghijklmnopqrstuvwxyz" var errs []error
// Copied from https://stackoverflow.com/a/31832326 with thanks for i := range testJobs {
func RandStringBytesRmndr(n int) string { testResultCMName := testJobs[i].configMapName
b := make([]byte, n)
for i := range b { kubectlEnv := []string{
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))] "KUBECONFIG=" + env.Kubeconfig(),
} }
return string(b)
cmCfg := testing.KubectlConfig{
Env: kubectlEnv,
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
m, err := env.Kubectl.GetCMLiterals(ctx, testResultCMName, cmCfg)
if err != nil {
errs = append(errs, err)
} else {
result := m["status"]
results = append(results, result)
}
}
var err error
if len(errs) > 0 {
var msg string
for i, e := range errs {
msg += fmt.Sprintf("error%d: %v\n", i, e)
}
err = fmt.Errorf("%d errors occurred: %s", len(errs), msg)
}
return results, err
}, 60*time.Second, 10*time.Second).Should(gomega.Equal(expected))
} }

42
testing/bash.go Normal file
View File

@ -0,0 +1,42 @@
package testing
import (
"context"
"os"
"os/exec"
"path/filepath"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
)
type ScriptConfig struct {
Env []string
Dir string
}
type Bash struct {
runtime.Cmdr
}
func (k *Bash) RunScript(ctx context.Context, path string, cfg ScriptConfig) error {
abs, err := filepath.Abs(path)
if err != nil {
return err
}
if _, err := k.CombinedOutput(k.bashRunScriptCmd(ctx, abs, cfg)); err != nil {
return err
}
return nil
}
func (k *Bash) bashRunScriptCmd(ctx context.Context, path string, cfg ScriptConfig) *exec.Cmd {
cmd := exec.CommandContext(ctx, "bash", path)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, cfg.Env...)
cmd.Dir = cfg.Dir
return cmd
}

49
testing/docker.go Normal file
View File

@ -0,0 +1,49 @@
package testing
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
)
type Docker struct {
runtime.Cmdr
}
type DockerBuild struct {
Dockerfile string
Args []BuildArg
Image ContainerImage
}
type BuildArg struct {
Name, Value string
}
func (k *Docker) Build(ctx context.Context, builds []DockerBuild) error {
for _, build := range builds {
var args []string
args = append(args, "--build-arg=TARGETPLATFORM="+"linux/amd64")
for _, buildArg := range build.Args {
args = append(args, "--build-arg="+buildArg.Name+"="+buildArg.Value)
}
_, err := k.CombinedOutput(k.dockerBuildCmd(ctx, build.Dockerfile, build.Image.Repo, build.Image.Tag, args))
if err != nil {
return fmt.Errorf("failed building %v: %w", build, err)
}
}
return nil
}
func (k *Docker) dockerBuildCmd(ctx context.Context, dockerfile, repo, tag string, args []string) *exec.Cmd {
buildContext := filepath.Dir(dockerfile)
args = append([]string{"build", "--tag", repo + ":" + tag, "-f", dockerfile, buildContext}, args...)
cmd := exec.CommandContext(ctx, "docker", args...)
return cmd
}

16
testing/getenv.go Normal file
View File

@ -0,0 +1,16 @@
package testing
import (
"os"
"testing"
)
func Getenv(t *testing.T, name string) string {
t.Helper()
v := os.Getenv(name)
if v == "" {
t.Fatal(name + " must be set")
}
return v
}

View File

@ -7,7 +7,8 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
) )
type GitRepo struct { type GitRepo struct {
@ -15,6 +16,8 @@ type GitRepo struct {
Name string Name string
CommitMessage string CommitMessage string
Contents map[string][]byte Contents map[string][]byte
runtime.Cmdr
} }
func (g *GitRepo) Sync(ctx context.Context) error { func (g *GitRepo) Sync(ctx context.Context) error {
@ -34,7 +37,7 @@ func (g *GitRepo) Sync(ctx context.Context) error {
return fmt.Errorf("error getting abs path for %q: %w", g.Dir, err) return fmt.Errorf("error getting abs path for %q: %w", g.Dir, err)
} }
if _, err := g.combinedOutput(g.gitCloneCmd(ctx, repoURL, dir)); err != nil { if _, err := g.CombinedOutput(g.gitCloneCmd(ctx, repoURL, dir)); err != nil {
return err return err
} }
@ -45,17 +48,17 @@ func (g *GitRepo) Sync(ctx context.Context) error {
return fmt.Errorf("error writing %s: %w", path, err) return fmt.Errorf("error writing %s: %w", path, err)
} }
if _, err := g.combinedOutput(g.gitAddCmd(ctx, dir, path)); err != nil { if _, err := g.CombinedOutput(g.gitAddCmd(ctx, dir, path)); err != nil {
return err return err
} }
} }
if _, err := g.combinedOutput(g.gitDiffCmd(ctx, dir)); err != nil { if _, err := g.CombinedOutput(g.gitDiffCmd(ctx, dir)); err != nil {
if _, err := g.combinedOutput(g.gitCommitCmd(ctx, dir, g.CommitMessage)); err != nil { if _, err := g.CombinedOutput(g.gitCommitCmd(ctx, dir, g.CommitMessage)); err != nil {
return err return err
} }
if _, err := g.combinedOutput(g.gitPushCmd(ctx, dir)); err != nil { if _, err := g.CombinedOutput(g.gitPushCmd(ctx, dir)); err != nil {
return err return err
} }
} }
@ -90,23 +93,3 @@ func (g *GitRepo) gitPushCmd(ctx context.Context, dir string) *exec.Cmd {
cmd.Dir = dir cmd.Dir = dir
return cmd return cmd
} }
func (g *GitRepo) combinedOutput(cmd *exec.Cmd) (string, error) {
o, err := cmd.CombinedOutput()
if err != nil {
args := append([]string{}, cmd.Args...)
args[0] = cmd.Path
cs := strings.Join(args, " ")
s := string(o)
g.errorf("%s failed with output:\n%s", cs, s)
return s, err
}
return string(o), nil
}
func (g *GitRepo) errorf(f string, args ...interface{}) {
fmt.Fprintf(os.Stderr, f+"\n", args...)
}

125
testing/kubectl.go Normal file
View File

@ -0,0 +1,125 @@
package testing
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"time"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
)
type Kubectl struct {
runtime.Cmdr
}
type KubectlConfig struct {
Env []string
NoValidate bool
Timeout time.Duration
Namespace string
}
func (k KubectlConfig) WithTimeout(o time.Duration) KubectlConfig {
k.Timeout = o
return k
}
func (k *Kubectl) EnsureNS(ctx context.Context, name string, cfg KubectlConfig) error {
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "get", []string{"ns", name}, cfg)); err != nil {
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "create", []string{"ns", name}, cfg)); err != nil {
return err
}
}
return nil
}
func (k *Kubectl) GetClusterRoleBinding(ctx context.Context, name string, cfg KubectlConfig) (string, error) {
o, err := k.CombinedOutput(k.kubectlCmd(ctx, "get", []string{"clusterrolebinding", name}, cfg))
if err != nil {
return "", err
}
return o, nil
}
func (k *Kubectl) CreateClusterRoleBindingServiceAccount(ctx context.Context, name string, clusterrole string, sa string, cfg KubectlConfig) error {
_, err := k.CombinedOutput(k.kubectlCmd(ctx, "create", []string{"clusterrolebinding", name, "--clusterrole=" + clusterrole, "--serviceaccount=" + sa}, cfg))
if err != nil {
return err
}
return nil
}
func (k *Kubectl) GetCMLiterals(ctx context.Context, name string, cfg KubectlConfig) (map[string]string, error) {
o, err := k.CombinedOutput(k.kubectlCmd(ctx, "get", []string{"cm", name, "-o=json"}, cfg))
if err != nil {
return nil, err
}
var cm struct {
Data map[string]string `json:"data"`
}
if err := json.Unmarshal([]byte(o), &cm); err != nil {
k.Errorf("Failed unmarshalling this data to JSON:\n%s\n", o)
return nil, fmt.Errorf("unmarshalling json: %w", err)
}
return cm.Data, nil
}
func (k *Kubectl) CreateCMLiterals(ctx context.Context, name string, literals map[string]string, cfg KubectlConfig) error {
args := []string{"cm", name}
for k, v := range literals {
args = append(args, fmt.Sprintf("--from-literal=%s=%s", k, v))
}
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "create", args, cfg)); err != nil {
return err
}
return nil
}
func (k *Kubectl) Apply(ctx context.Context, path string, cfg KubectlConfig) error {
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil {
return err
}
return nil
}
func (k *Kubectl) WaitUntilDeployAvailable(ctx context.Context, name string, cfg KubectlConfig) error {
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "wait", []string{"deploy/" + name, "--for=condition=available"}, cfg)); err != nil {
return err
}
return nil
}
func (k *Kubectl) kubectlCmd(ctx context.Context, c string, args []string, cfg KubectlConfig) *exec.Cmd {
args = append([]string{c}, args...)
if cfg.NoValidate {
args = append(args, "--validate=false")
}
if cfg.Namespace != "" {
args = append(args, "-n="+cfg.Namespace)
}
if cfg.Timeout > 0 {
args = append(args, "--timeout="+fmt.Sprintf("%s", cfg.Timeout))
}
cmd := exec.CommandContext(ctx, "kubectl", args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, cfg.Env...)
return cmd
}

21
testing/random.go Normal file
View File

@ -0,0 +1,21 @@
package testing
import (
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
// Copied from https://stackoverflow.com/a/31832326 with thanks
func RandStringBytesRmndr(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
}
return string(b)
}

View File

@ -0,0 +1,31 @@
package runtime
import (
"fmt"
"os"
"os/exec"
"strings"
)
type Cmdr struct {
}
func (k Cmdr) CombinedOutput(cmd *exec.Cmd) (string, error) {
o, err := cmd.CombinedOutput()
if err != nil {
args := append([]string{}, cmd.Args...)
args[0] = cmd.Path
cs := strings.Join(args, " ")
s := string(o)
k.Errorf("%s failed with output:\n%s", cs, s)
return s, err
}
return string(o), nil
}
func (k Cmdr) Errorf(f string, args ...interface{}) {
fmt.Fprintf(os.Stderr, f+"\n", args...)
}

View File

@ -2,7 +2,6 @@ package testing
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
@ -10,16 +9,199 @@ import (
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/actions-runner-controller/actions-runner-controller/testing/runtime"
) )
type T = testing.T type T = testing.T
var Short = testing.Short var Short = testing.Short
// Cluster is a test cluster backend by a kind cluster and the dockerd powering it. func Img(repo, tag string) ContainerImage {
return ContainerImage{
Repo: repo,
Tag: tag,
}
}
// Env is a testing environment.
// All of its methods are idempotent so that you can safely call it from within each subtest
// and you can rerun the individual subtest until it works as you expect.
type Env struct {
kind *Kind
docker *Docker
Kubectl *Kubectl
bash *Bash
id string
}
func Start(t *testing.T, opts ...Option) *Env {
t.Helper()
k := StartKind(t, opts...)
var env Env
env.kind = k
d := &Docker{}
env.docker = d
kctl := &Kubectl{}
env.Kubectl = kctl
bash := &Bash{}
env.bash = bash
//
cmKey := "id"
kubectlEnv := []string{
"KUBECONFIG=" + k.Kubeconfig(),
}
cmCfg := KubectlConfig{
Env: kubectlEnv,
}
testInfoName := "test-info"
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
m, _ := kctl.GetCMLiterals(ctx, testInfoName, cmCfg)
if m == nil {
id := RandStringBytesRmndr(10)
m = map[string]string{cmKey: id}
if err := kctl.CreateCMLiterals(ctx, testInfoName, m, cmCfg); err != nil {
t.Fatal(err)
}
}
env.id = m[cmKey]
return &env
}
func (e *Env) ID() string {
return e.id
}
func (e *Env) DockerBuild(t *testing.T, builds []DockerBuild) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
if err := e.docker.Build(ctx, builds); err != nil {
t.Fatal(err)
}
}
func (e *Env) KindLoadImages(t *testing.T, prebuildImages []ContainerImage) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
if err := e.kind.LoadImages(ctx, prebuildImages); err != nil {
t.Fatal(err)
}
}
func (e *Env) KubectlApply(t *testing.T, path string, cfg KubectlConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
kubectlEnv := []string{
"KUBECONFIG=" + e.kind.Kubeconfig(),
}
cfg.Env = append(kubectlEnv, cfg.Env...)
if err := e.Kubectl.Apply(ctx, path, cfg); err != nil {
t.Fatal(err)
}
}
func (e *Env) KubectlWaitUntilDeployAvailable(t *testing.T, name string, cfg KubectlConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
kubectlEnv := []string{
"KUBECONFIG=" + e.kind.Kubeconfig(),
}
cfg.Env = append(kubectlEnv, cfg.Env...)
if err := e.Kubectl.WaitUntilDeployAvailable(ctx, name, cfg); err != nil {
t.Fatal(err)
}
}
func (e *Env) KubectlEnsureNS(t *testing.T, name string, cfg KubectlConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
kubectlEnv := []string{
"KUBECONFIG=" + e.kind.Kubeconfig(),
}
cfg.Env = append(kubectlEnv, cfg.Env...)
if err := e.Kubectl.EnsureNS(ctx, name, cfg); err != nil {
t.Fatal(err)
}
}
func (e *Env) KubectlEnsureClusterRoleBindingServiceAccount(t *testing.T, bindingName string, clusterrole string, serviceaccount string, cfg KubectlConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
kubectlEnv := []string{
"KUBECONFIG=" + e.kind.Kubeconfig(),
}
cfg.Env = append(kubectlEnv, cfg.Env...)
if _, err := e.Kubectl.GetClusterRoleBinding(ctx, bindingName, cfg); err != nil {
if err := e.Kubectl.CreateClusterRoleBindingServiceAccount(ctx, bindingName, clusterrole, serviceaccount, cfg); err != nil {
t.Fatal(err)
}
}
}
func (e *Env) Kubeconfig() string {
return e.kind.Kubeconfig()
}
func (e *Env) RunScript(t *testing.T, path string, cfg ScriptConfig) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
if err := e.bash.RunScript(ctx, path, cfg); err != nil {
t.Fatal(err)
}
}
// Kind is a test cluster backend by a kind cluster and the dockerd powering it.
// It intracts with the kind cluster via the kind command and dockerd via the docker command // It intracts with the kind cluster via the kind command and dockerd via the docker command
// for various operations that otherwise needs to be automated via shell scripts or makefiles. // for various operations that otherwise needs to be automated via shell scripts or makefiles.
type Cluster struct { type Kind struct {
// Name is the name of the cluster // Name is the name of the cluster
Name string Name string
@ -29,6 +211,8 @@ type Cluster struct {
Dir string Dir string
kubeconfig string kubeconfig string
runtime.Cmdr
} }
type Config struct { type Config struct {
@ -50,7 +234,7 @@ type ContainerImage struct {
Repo, Tag string Repo, Tag string
} }
func Start(t *testing.T, k Cluster, opts ...Option) *Cluster { func StartKind(t *testing.T, opts ...Option) *Kind {
t.Helper() t.Helper()
invalidChars := []string{"/"} invalidChars := []string{"/"}
@ -60,7 +244,7 @@ func Start(t *testing.T, k Cluster, opts ...Option) *Cluster {
for _, c := range invalidChars { for _, c := range invalidChars {
name = strings.ReplaceAll(name, c, "") name = strings.ReplaceAll(name, c, "")
} }
var k Kind
k.Name = name k.Name = name
k.Dir = t.TempDir() k.Dir = t.TempDir()
@ -118,12 +302,12 @@ func Start(t *testing.T, k Cluster, opts ...Option) *Cluster {
return kk return kk
} }
func (k *Cluster) Kubeconfig() string { func (k *Kind) Kubeconfig() string {
return k.kubeconfig return k.kubeconfig
} }
func (k *Cluster) Start(ctx context.Context) error { func (k *Kind) Start(ctx context.Context) error {
getNodes, err := k.combinedOutput(k.kindGetNodesCmd(ctx, k.Name)) getNodes, err := k.CombinedOutput(k.kindGetNodesCmd(ctx, k.Name))
if err != nil { if err != nil {
return err return err
} }
@ -145,7 +329,7 @@ name: %s
return err return err
} }
if _, err := k.combinedOutput(k.kindCreateCmd(ctx, k.Name, f.Name())); err != nil { if _, err := k.CombinedOutput(k.kindCreateCmd(ctx, k.Name, f.Name())); err != nil {
return err return err
} }
} }
@ -153,70 +337,15 @@ name: %s
return nil return nil
} }
func (k *Cluster) combinedOutput(cmd *exec.Cmd) (string, error) { func (k *Kind) kindGetNodesCmd(ctx context.Context, cluster string) *exec.Cmd {
o, err := cmd.CombinedOutput()
if err != nil {
args := append([]string{}, cmd.Args...)
args[0] = cmd.Path
cs := strings.Join(args, " ")
s := string(o)
k.errorf("%s failed with output:\n%s", cs, s)
return s, err
}
return string(o), nil
}
func (k *Cluster) errorf(f string, args ...interface{}) {
fmt.Fprintf(os.Stderr, f+"\n", args...)
}
func (k *Cluster) kindGetNodesCmd(ctx context.Context, cluster string) *exec.Cmd {
return exec.CommandContext(ctx, "kind", "get", "nodes", "--name", cluster) return exec.CommandContext(ctx, "kind", "get", "nodes", "--name", cluster)
} }
func (k *Cluster) kindCreateCmd(ctx context.Context, cluster, configFile string) *exec.Cmd { func (k *Kind) kindCreateCmd(ctx context.Context, cluster, configFile string) *exec.Cmd {
return exec.CommandContext(ctx, "kind", "create", "cluster", "--name", cluster, "--config", configFile) return exec.CommandContext(ctx, "kind", "create", "cluster", "--name", cluster, "--config", configFile)
} }
type DockerBuild struct { func (k *Kind) LoadImages(ctx context.Context, images []ContainerImage) error {
Dockerfile string
Args []BuildArg
Image ContainerImage
}
type BuildArg struct {
Name, Value string
}
func (k *Cluster) BuildImages(ctx context.Context, builds []DockerBuild) error {
for _, build := range builds {
var args []string
args = append(args, "--build-arg=TARGETPLATFORM="+"linux/amd64")
for _, buildArg := range build.Args {
args = append(args, "--build-arg="+buildArg.Name+"="+buildArg.Value)
}
_, err := k.combinedOutput(k.dockerBuildCmd(ctx, build.Dockerfile, build.Image.Repo, build.Image.Tag, args))
if err != nil {
return fmt.Errorf("failed building %v: %w", build, err)
}
}
return nil
}
func (k *Cluster) dockerBuildCmd(ctx context.Context, dockerfile, repo, tag string, args []string) *exec.Cmd {
buildContext := filepath.Dir(dockerfile)
args = append([]string{"build", "--tag", repo + ":" + tag, "-f", dockerfile, buildContext}, args...)
cmd := exec.CommandContext(ctx, "docker", args...)
return cmd
}
func (k *Cluster) LoadImages(ctx context.Context, images []ContainerImage) error {
for _, img := range images { for _, img := range images {
const maxRetries = 5 const maxRetries = 5
@ -236,7 +365,7 @@ func (k *Cluster) LoadImages(ctx context.Context, images []ContainerImage) error
}() }()
for i := 0; i <= maxRetries; i++ { for i := 0; i <= maxRetries; i++ {
out, err := k.combinedOutput(k.kindLoadDockerImageCmd(ctx, k.Name, img.Repo, img.Tag, tmpDir)) out, err := k.CombinedOutput(k.kindLoadDockerImageCmd(ctx, k.Name, img.Repo, img.Tag, tmpDir))
out = strings.TrimSpace(out) out = strings.TrimSpace(out)
@ -256,7 +385,7 @@ func (k *Cluster) LoadImages(ctx context.Context, images []ContainerImage) error
return nil return nil
} }
func (k *Cluster) kindLoadDockerImageCmd(ctx context.Context, cluster, repo, tag, tmpDir string) *exec.Cmd { func (k *Kind) kindLoadDockerImageCmd(ctx context.Context, cluster, repo, tag, tmpDir string) *exec.Cmd {
cmd := exec.CommandContext(ctx, "kind", "--loglevel=trace", "load", "docker-image", repo+":"+tag, "--name", cluster) cmd := exec.CommandContext(ctx, "kind", "--loglevel=trace", "load", "docker-image", repo+":"+tag, "--name", cluster)
cmd.Env = os.Environ() cmd.Env = os.Environ()
// Set TMPDIR to somewhere under $HOME when you use docker installed with Ubuntu snap // Set TMPDIR to somewhere under $HOME when you use docker installed with Ubuntu snap
@ -271,9 +400,9 @@ func (k *Cluster) kindLoadDockerImageCmd(ctx context.Context, cluster, repo, tag
return cmd return cmd
} }
func (k *Cluster) PullImages(ctx context.Context, images []ContainerImage) error { func (k *Kind) PullImages(ctx context.Context, images []ContainerImage) error {
for _, img := range images { for _, img := range images {
_, err := k.combinedOutput(k.dockerPullCmd(ctx, img.Repo, img.Tag)) _, err := k.CombinedOutput(k.dockerPullCmd(ctx, img.Repo, img.Tag))
if err != nil { if err != nil {
return err return err
} }
@ -282,11 +411,11 @@ func (k *Cluster) PullImages(ctx context.Context, images []ContainerImage) error
return nil return nil
} }
func (k *Cluster) dockerPullCmd(ctx context.Context, repo, tag string) *exec.Cmd { func (k *Kind) dockerPullCmd(ctx context.Context, repo, tag string) *exec.Cmd {
return exec.CommandContext(ctx, "docker", "pull", repo+":"+tag) return exec.CommandContext(ctx, "docker", "pull", repo+":"+tag)
} }
func (k *Cluster) Stop(ctx context.Context) error { func (k *Kind) Stop(ctx context.Context) error {
if err := k.kindDeleteCmd(ctx, k.Name).Run(); err != nil { if err := k.kindDeleteCmd(ctx, k.Name).Run(); err != nil {
return err return err
} }
@ -294,11 +423,11 @@ func (k *Cluster) Stop(ctx context.Context) error {
return nil return nil
} }
func (k *Cluster) kindDeleteCmd(ctx context.Context, cluster string) *exec.Cmd { func (k *Kind) kindDeleteCmd(ctx context.Context, cluster string) *exec.Cmd {
return exec.CommandContext(ctx, "kind", "delete", "cluster", "--name", cluster) return exec.CommandContext(ctx, "kind", "delete", "cluster", "--name", cluster)
} }
func (k *Cluster) writeKubeconfig(ctx context.Context) error { func (k *Kind) writeKubeconfig(ctx context.Context) error {
var err error var err error
k.kubeconfig, err = filepath.Abs(filepath.Join(k.Dir, "kubeconfig")) k.kubeconfig, err = filepath.Abs(filepath.Join(k.Dir, "kubeconfig"))
@ -313,147 +442,10 @@ func (k *Cluster) writeKubeconfig(ctx context.Context) error {
return nil return nil
} }
func (k *Cluster) kindExportKubeconfigCmd(ctx context.Context, cluster, path string) *exec.Cmd { func (k *Kind) kindExportKubeconfigCmd(ctx context.Context, cluster, path string) *exec.Cmd {
cmd := exec.CommandContext(ctx, "kind", "export", "kubeconfig", "--name", cluster) cmd := exec.CommandContext(ctx, "kind", "export", "kubeconfig", "--name", cluster)
cmd.Env = os.Environ() cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "KUBECONFIG="+path) cmd.Env = append(cmd.Env, "KUBECONFIG="+path)
return cmd return cmd
} }
type KubectlConfig struct {
Env []string
NoValidate bool
Timeout time.Duration
Namespace string
}
func (k KubectlConfig) WithTimeout(o time.Duration) KubectlConfig {
k.Timeout = o
return k
}
func (k *Cluster) RunKubectlEnsureNS(ctx context.Context, name string, cfg KubectlConfig) error {
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "get", []string{"ns", name}, cfg)); err != nil {
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "create", []string{"ns", name}, cfg)); err != nil {
return err
}
}
return nil
}
func (k *Cluster) GetClusterRoleBinding(ctx context.Context, name string, cfg KubectlConfig) (string, error) {
o, err := k.combinedOutput(k.kubectlCmd(ctx, "get", []string{"clusterrolebinding", name}, cfg))
if err != nil {
return "", err
}
return o, nil
}
func (k *Cluster) CreateClusterRoleBindingServiceAccount(ctx context.Context, name string, clusterrole string, sa string, cfg KubectlConfig) error {
_, err := k.combinedOutput(k.kubectlCmd(ctx, "create", []string{"clusterrolebinding", name, "--clusterrole=" + clusterrole, "--serviceaccount=" + sa}, cfg))
if err != nil {
return err
}
return nil
}
func (k *Cluster) GetCMLiterals(ctx context.Context, name string, cfg KubectlConfig) (map[string]string, error) {
o, err := k.combinedOutput(k.kubectlCmd(ctx, "get", []string{"cm", name, "-o=json"}, cfg))
if err != nil {
return nil, err
}
var cm struct {
Data map[string]string `json:"data"`
}
if err := json.Unmarshal([]byte(o), &cm); err != nil {
k.errorf("Failed unmarshalling this data to JSON:\n%s\n", o)
return nil, fmt.Errorf("unmarshalling json: %w", err)
}
return cm.Data, nil
}
func (k *Cluster) CreateCMLiterals(ctx context.Context, name string, literals map[string]string, cfg KubectlConfig) error {
args := []string{"cm", name}
for k, v := range literals {
args = append(args, fmt.Sprintf("--from-literal=%s=%s", k, v))
}
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "create", args, cfg)); err != nil {
return err
}
return nil
}
func (k *Cluster) Apply(ctx context.Context, path string, cfg KubectlConfig) error {
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil {
return err
}
return nil
}
func (k *Cluster) WaitUntilDeployAvailable(ctx context.Context, name string, cfg KubectlConfig) error {
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "wait", []string{"deploy/" + name, "--for=condition=available"}, cfg)); err != nil {
return err
}
return nil
}
func (k *Cluster) kubectlCmd(ctx context.Context, c string, args []string, cfg KubectlConfig) *exec.Cmd {
args = append([]string{c}, args...)
if cfg.NoValidate {
args = append(args, "--validate=false")
}
if cfg.Namespace != "" {
args = append(args, "-n="+cfg.Namespace)
}
if cfg.Timeout > 0 {
args = append(args, "--timeout="+fmt.Sprintf("%s", cfg.Timeout))
}
cmd := exec.CommandContext(ctx, "kubectl", args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, cfg.Env...)
return cmd
}
type ScriptConfig struct {
Env []string
Dir string
}
func (k *Cluster) RunScript(ctx context.Context, path string, cfg ScriptConfig) error {
abs, err := filepath.Abs(path)
if err != nil {
return err
}
if _, err := k.combinedOutput(k.bashRunScriptCmd(ctx, abs, cfg)); err != nil {
return err
}
return nil
}
func (k *Cluster) bashRunScriptCmd(ctx context.Context, path string, cfg ScriptConfig) *exec.Cmd {
cmd := exec.CommandContext(ctx, "bash", path)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, cfg.Env...)
cmd.Dir = cfg.Dir
return cmd
}