e2e: Install and run workflow and verify the result (#661)

This enhances the E2E test suite introduced in #658 to also include the following steps:

- Install GitHub Actions workflow
- Trigger a workflow run via a git commit
- Verify the workflow run result

In the workflow, we use `kubectl create cm --from-literal` to create a configmap that contains an unique test ID. In the last step we obtain the configmap from within the E2E test and check the test ID to match the expected one.

To install a GitHub Actions workflow, we clone a GitHub repository denoted by the TEST_REPO envvar, progmatically generate a few files with some Go code, run `git-add`, `git-commit`, and then `git-push` to actually push the files to the repository. A single commit containing an updated workflow definition and an updated file seems to run a workflow derived to the definition introduced in the commit, which was a bit surpirising and useful behaviour.

At this point, the E2E test fully covers all the steps for a GitHub token based installation. We need to add scenarios for more deployment options, like GitHub App, RunnerDeployment, HRA, and so on. But each of them would worth another pull request.
This commit is contained in:
Yusuke Kuoka 2021-06-28 08:30:32 +09:00 committed by GitHub
parent 927d6f03ce
commit 7a305d2892
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 427 additions and 54 deletions

3
.gitignore vendored
View File

@ -19,6 +19,7 @@ bin
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.vscode
.idea
*.swp
*.swo
@ -31,3 +32,5 @@ bin
# OS
.DS_STORE
/test-assets

View File

@ -211,6 +211,11 @@ acceptance/deploy:
acceptance/tests:
acceptance/checks.sh
.PHONY: e2e
e2e:
go clean -testcache
go test -v -timeout 600s -run '^TestE2E$$' ./test/e2e
# Upload release file to GitHub.
github-release: release
ghr ${VERSION} release/

View File

@ -47,6 +47,8 @@ fi
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
sleep 20
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
if [ -n "${TEST_REPO}" ]; then
if [ -n "USE_RUNNERSET" ]; then
cat acceptance/testdata/repo.runnerset.yaml | envsubst | kubectl apply -f -

View File

@ -38,7 +38,8 @@ spec:
# labels:
# - "mylabel 1"
# - "mylabel 2"
labels:
- "${RUNNER_LABEL}"
#
# Non-standard working directory
#

1
go.mod
View File

@ -36,4 +36,5 @@ require (
sigs.k8s.io/controller-runtime v0.9.0
sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca // indirect
sigs.k8s.io/testing_frameworks v0.1.2 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

View File

@ -3,47 +3,37 @@ package e2e
import (
"context"
"fmt"
"math/rand"
"os"
"path/filepath"
"time"
"github.com/actions-runner-controller/actions-runner-controller/testing"
"github.com/onsi/gomega"
"sigs.k8s.io/yaml"
)
// If you're willing to run this test via VS Code "run test" or "debug test",
// almost certainly you'd want to make the default go test timeout from 30s to longer and enough value.
// Press Cmd + Shift + P, type "Workspace Settings" and open it, and type "go test timeout" and set e.g. 600s there.
// See https://github.com/golang/vscode-go/blob/master/docs/settings.md#gotesttimeout for more information.
//
// This tests ues testing.Logf extensively for debugging purpose.
// But messages logged via Logf shows up only when the test failed by default.
// To always enable logging, do not forget to pass `-test.v` to `go test`.
// If you're using VS Code, open `Workspace Settings` and search for `go test flags`, edit the `settings.json` and put the below:
// "go.testFlags": ["-v"]
func TestE2E(t *testing.T) {
if testing.Short() {
t.Skip("Skipped as -short is set")
}
Img := func(repo, tag string) testing.ContainerImage {
var (
Img = func(repo, tag string) testing.ContainerImage {
return testing.ContainerImage{
Repo: repo,
Tag: tag,
}
}
controllerImageRepo := "actionsrunnercontrollere2e/actions-runner-controller"
controllerImageTag := "e2e"
controllerImage := Img(controllerImageRepo, controllerImageTag)
runnerImageRepo := "actionsrunnercontrollere2e/actions-runner"
runnerImageTag := "e2e"
runnerImage := Img(runnerImageRepo, runnerImageTag)
controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller"
controllerImageTag = "e2e"
controllerImage = Img(controllerImageRepo, controllerImageTag)
runnerImageRepo = "actionsrunnercontrollere2e/actions-runner"
runnerImageTag = "e2e"
runnerImage = Img(runnerImageRepo, runnerImageTag)
prebuildImages := []testing.ContainerImage{
prebuildImages = []testing.ContainerImage{
controllerImage,
runnerImage,
}
builds := []testing.DockerBuild{
builds = []testing.DockerBuild{
{
Dockerfile: "../../Dockerfile",
Args: []testing.BuildArg{},
@ -56,15 +46,35 @@ func TestE2E(t *testing.T) {
},
}
certManagerVersion := "v1.1.1"
certManagerVersion = "v1.1.1"
images := []testing.ContainerImage{
images = []testing.ContainerImage{
Img("docker", "dind"),
Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"),
Img("quay.io/jetstack/cert-manager-controller", certManagerVersion),
Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion),
Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
}
)
// If you're willing to run this test via VS Code "run test" or "debug test",
// almost certainly you'd want to make the default go test timeout from 30s to longer and enough value.
// Press Cmd + Shift + P, type "Workspace Settings" and open it, and type "go test timeout" and set e.g. 600s there.
// See https://github.com/golang/vscode-go/blob/master/docs/settings.md#gotesttimeout for more information.
//
// This tests ues testing.Logf extensively for debugging purpose.
// But messages logged via Logf shows up only when the test failed by default.
// To always enable logging, do not forget to pass `-test.v` to `go test`.
// If you're using VS Code, open `Workspace Settings` and search for `go test flags`, edit the `settings.json` and put the below:
// "go.testFlags": ["-v"]
//
// This function requires a few environment variables to be set to provide some test data.
// If you're using VS Code and wanting to run this test locally,
// Browse "Workspace Settings" and search for "go test env file" and put e.g. "${workspaceFolder}/.test.env" there.
func TestE2E(t *testing.T) {
if testing.Short() {
t.Skip("Skipped as -short is set")
}
k := testing.Start(t, testing.Cluster{}, testing.Preload(images...))
@ -88,27 +98,27 @@ func TestE2E(t *testing.T) {
}
t.Run("install cert-manager", func(t *testing.T) {
certmanagerVersion := "v1.1.1"
applyCfg := testing.KubectlConfig{NoValidate: true, Env: kubectlEnv}
if err := k.Apply(ctx, fmt.Sprintf("https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml", certmanagerVersion), testing.KubectlConfig{NoValidate: true}); err != nil {
if err := k.Apply(ctx, fmt.Sprintf("https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml", certManagerVersion), applyCfg); err != nil {
t.Fatal(err)
}
certmanagerKubectlCfg := testing.KubectlConfig{
waitCfg := testing.KubectlConfig{
Env: kubectlEnv,
Namespace: "cert-manager",
Timeout: 90 * time.Second,
}
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager-cainjector", certmanagerKubectlCfg); err != nil {
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager-cainjector", waitCfg); err != nil {
t.Fatal(err)
}
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager-webhook", certmanagerKubectlCfg.WithTimeout(60*time.Second)); err != nil {
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager-webhook", waitCfg.WithTimeout(60*time.Second)); err != nil {
t.Fatal(err)
}
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager", certmanagerKubectlCfg.WithTimeout(60*time.Second)); err != nil {
if err := k.WaitUntilDeployAvailable(ctx, "cert-manager", waitCfg.WithTimeout(60*time.Second)); err != nil {
t.Fatal(err)
}
@ -117,32 +127,175 @@ func TestE2E(t *testing.T) {
}
})
// If you're using VS Code and wanting to run this test locally,
// Browse "Workspace Settings" and search for "go test env file" and put e.g. "${workspaceFolder}/.test.env" there
githubToken := os.Getenv("GITHUB_TOKEN")
if githubToken == "" {
t.Fatal("GITHUB_TOKEN must be set")
t.Run("make default serviceaccount cluster-admin", func(t *testing.T) {
cfg := testing.KubectlConfig{Env: kubectlEnv}
bindingName := "default-admin"
if _, err := k.GetClusterRoleBinding(ctx, bindingName, cfg); err != nil {
if err := k.CreateClusterRoleBindingServiceAccount(ctx, bindingName, "cluster-admin", "default:default", cfg); err != nil {
t.Fatal(err)
}
}
})
cmCfg := testing.KubectlConfig{
Env: kubectlEnv,
}
testInfoName := "test-info"
m, _ := k.GetCMLiterals(ctx, testInfoName, cmCfg)
t.Run("Save test ID", func(t *testing.T) {
if m == nil {
id := RandStringBytesRmndr(10)
m = map[string]string{"id": id}
if err := k.CreateCMLiterals(ctx, testInfoName, m, cmCfg); err != nil {
t.Fatal(err)
}
}
})
id := m["id"]
runnerLabel := "test-" + id
testID := t.Name() + " " + id
t.Logf("Using test id %s", testID)
githubToken := getenv(t, "GITHUB_TOKEN")
testRepo := getenv(t, "TEST_REPO")
testOrg := getenv(t, "TEST_ORG")
testOrgRepo := getenv(t, "TEST_ORG_REPO")
if t.Failed() {
return
}
scriptEnv := []string{
"KUBECONFIG=" + k.Kubeconfig(),
"NAME=" + controllerImageRepo,
"VERSION=" + controllerImageTag,
"RUNNER_NAME=" + runnerImageRepo,
"RUNNER_TAG=" + runnerImageTag,
"TEST_REPO=" + "actions-runner-controller/mumoshu-actions-test",
"TEST_ORG=" + "actions-runner-controller",
"TEST_ORG_REPO=" + "actions-runner-controller/mumoshu-actions-test-org-runners",
"SYNC_PERIOD=" + "10s",
"USE_RUNNERSET=" + "1",
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
"ACCEPTANCE_TEST_SECRET_TYPE=token",
"GITHUB_TOKEN=" + githubToken,
}
t.Run("install actions-runner-controller and runners", func(t *testing.T) {
scriptEnv := []string{
"KUBECONFIG=" + k.Kubeconfig(),
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
"ACCEPTANCE_TEST_SECRET_TYPE=token",
"NAME=" + controllerImageRepo,
"VERSION=" + controllerImageTag,
"RUNNER_NAME=" + runnerImageRepo,
"RUNNER_TAG=" + runnerImageTag,
"TEST_REPO=" + testRepo,
"TEST_ORG=" + testOrg,
"TEST_ORG_REPO=" + testOrgRepo,
"SYNC_PERIOD=" + "10s",
"USE_RUNNERSET=" + "1",
"GITHUB_TOKEN=" + githubToken,
"RUNNER_LABEL=" + runnerLabel,
}
t.Run("install actions-runner-controller", func(t *testing.T) {
if err := k.RunScript(ctx, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv}); err != nil {
t.Fatal(err)
}
})
testResultCMName := fmt.Sprintf("test-result-%s", id)
if t.Failed() {
return
}
t.Run("Install workflow", func(t *testing.T) {
wfName := "E2E " + testID
wf := testing.Workflow{
Name: wfName,
On: testing.On{
Push: &testing.Push{
Branches: []string{"main"},
},
},
Jobs: map[string]testing.Job{
"test": {
RunsOn: runnerLabel,
Steps: []testing.Step{
{
Uses: testing.ActionsCheckoutV2,
},
{
Uses: "azure/setup-kubectl@v1",
With: &testing.With{
Version: "v1.20.2",
},
},
{
Run: "./test.sh",
},
},
},
},
}
wfContent, err := yaml.Marshal(wf)
if err != nil {
t.Fatal(err)
}
script := []byte(fmt.Sprintf(`#!/usr/bin/env bash
set -vx
echo hello from %s
kubectl delete cm %s || true
kubectl create cm %s --from-literal=status=ok
`, testID, testResultCMName, testResultCMName))
g := testing.GitRepo{
Dir: filepath.Join(t.TempDir(), "gitrepo"),
Name: testRepo,
CommitMessage: wfName,
Contents: map[string][]byte{
".github/workflows/workflow.yaml": wfContent,
"test.sh": script,
},
}
if err := g.Sync(ctx); err != nil {
t.Fatal(err)
}
})
if t.Failed() {
return
}
t.Run("Verify workflow run result", func(t *testing.T) {
gomega.NewGomegaWithT(t).Eventually(func() (string, error) {
m, err := k.GetCMLiterals(ctx, testResultCMName, cmCfg)
if err != nil {
return "", err
}
result := m["status"]
return result, nil
}, 60*time.Second, 10*time.Second).Should(gomega.Equal("ok"))
})
}
func getenv(t *testing.T, name string) string {
t.Helper()
v := os.Getenv(name)
if v == "" {
t.Fatal(name + " must be set")
}
return v
}
func init() {
rand.Seed(time.Now().UnixNano())
}
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
// Copied from https://stackoverflow.com/a/31832326 with thanks
func RandStringBytesRmndr(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
}
return string(b)
}

112
testing/git.go Normal file
View File

@ -0,0 +1,112 @@
package testing
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
type GitRepo struct {
Dir string
Name string
CommitMessage string
Contents map[string][]byte
}
func (g *GitRepo) Sync(ctx context.Context) error {
repoName := g.Name
if repoName == "" {
return errors.New("missing git repo name")
}
repoURL := fmt.Sprintf("git@github.com:%s.git", repoName)
if g.Dir == "" {
return errors.New("missing git dir")
}
dir, err := filepath.Abs(g.Dir)
if err != nil {
return fmt.Errorf("error getting abs path for %q: %w", g.Dir, err)
}
if _, err := g.combinedOutput(g.gitCloneCmd(ctx, repoURL, dir)); err != nil {
return err
}
for path, content := range g.Contents {
absPath := filepath.Join(dir, path)
if err := os.WriteFile(absPath, content, 0755); err != nil {
return fmt.Errorf("error writing %s: %w", path, err)
}
if _, err := g.combinedOutput(g.gitAddCmd(ctx, dir, path)); err != nil {
return err
}
}
if _, err := g.combinedOutput(g.gitDiffCmd(ctx, dir)); err != nil {
if _, err := g.combinedOutput(g.gitCommitCmd(ctx, dir, g.CommitMessage)); err != nil {
return err
}
if _, err := g.combinedOutput(g.gitPushCmd(ctx, dir)); err != nil {
return err
}
}
return nil
}
func (g *GitRepo) gitCloneCmd(ctx context.Context, repo, dir string) *exec.Cmd {
return exec.CommandContext(ctx, "git", "clone", repo, dir)
}
func (g *GitRepo) gitDiffCmd(ctx context.Context, dir string) *exec.Cmd {
cmd := exec.CommandContext(ctx, "git", "diff", "--exit-code", "--cached")
cmd.Dir = dir
return cmd
}
func (g *GitRepo) gitAddCmd(ctx context.Context, dir, path string) *exec.Cmd {
cmd := exec.CommandContext(ctx, "git", "add", path)
cmd.Dir = dir
return cmd
}
func (g *GitRepo) gitCommitCmd(ctx context.Context, dir, msg string) *exec.Cmd {
cmd := exec.CommandContext(ctx, "git", "commit", "-m", msg)
cmd.Dir = dir
return cmd
}
func (g *GitRepo) gitPushCmd(ctx context.Context, dir string) *exec.Cmd {
cmd := exec.CommandContext(ctx, "git", "push", "origin", "main")
cmd.Dir = dir
return cmd
}
func (g *GitRepo) combinedOutput(cmd *exec.Cmd) (string, error) {
o, err := cmd.CombinedOutput()
if err != nil {
args := append([]string{}, cmd.Args...)
args[0] = cmd.Path
cs := strings.Join(args, " ")
s := string(o)
g.errorf("%s failed with output:\n%s", cs, s)
return s, err
}
return string(o), nil
}
func (g *GitRepo) errorf(f string, args ...interface{}) {
fmt.Fprintf(os.Stderr, f+"\n", args...)
}

View File

@ -2,6 +2,7 @@ package testing
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
@ -342,6 +343,55 @@ func (k *Cluster) RunKubectlEnsureNS(ctx context.Context, name string, cfg Kubec
return nil
}
func (k *Cluster) GetClusterRoleBinding(ctx context.Context, name string, cfg KubectlConfig) (string, error) {
o, err := k.combinedOutput(k.kubectlCmd(ctx, "get", []string{"clusterrolebinding", name}, cfg))
if err != nil {
return "", err
}
return o, nil
}
func (k *Cluster) CreateClusterRoleBindingServiceAccount(ctx context.Context, name string, clusterrole string, sa string, cfg KubectlConfig) error {
_, err := k.combinedOutput(k.kubectlCmd(ctx, "create", []string{"clusterrolebinding", name, "--clusterrole=" + clusterrole, "--serviceaccount=" + sa}, cfg))
if err != nil {
return err
}
return nil
}
func (k *Cluster) GetCMLiterals(ctx context.Context, name string, cfg KubectlConfig) (map[string]string, error) {
o, err := k.combinedOutput(k.kubectlCmd(ctx, "get", []string{"cm", name, "-o=json"}, cfg))
if err != nil {
return nil, err
}
var cm struct {
Data map[string]string `json:"data"`
}
if err := json.Unmarshal([]byte(o), &cm); err != nil {
k.errorf("Failed unmarshalling this data to JSON:\n%s\n", o)
return nil, fmt.Errorf("unmarshalling json: %w", err)
}
return cm.Data, nil
}
func (k *Cluster) CreateCMLiterals(ctx context.Context, name string, literals map[string]string, cfg KubectlConfig) error {
args := []string{"cm", name}
for k, v := range literals {
args = append(args, fmt.Sprintf("--from-literal=%s=%s", k, v))
}
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "create", args, cfg)); err != nil {
return err
}
return nil
}
func (k *Cluster) Apply(ctx context.Context, path string, cfg KubectlConfig) error {
if _, err := k.combinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil {
return err

46
testing/workflow.go Normal file
View File

@ -0,0 +1,46 @@
package testing
const (
ActionsCheckoutV2 = "actions/checkout@v2"
)
type Workflow struct {
Name string `json:"name"`
On On `json:"on"`
Jobs map[string]Job `json:"jobs"`
}
type On struct {
Push *Push `json:"push,omitempty"`
WorkflowDispatch *WorkflowDispatch `json:"workflow_dispatch,omitempty"`
}
type Push struct {
Branches []string `json:"branches,omitempty"`
}
type WorkflowDispatch struct {
Inputs map[string]InputSpec `json:"inputs,omitempty"`
}
type InputSpec struct {
Description string `json:"description,omitempty"`
Required bool `json:"required,omitempty"`
Default string `json:"default,omitempty"`
}
type Job struct {
RunsOn string `json:"runs-on"`
Steps []Step `json:"steps"`
}
type Step struct {
Name string `json:"name,omitempty"`
Uses string `json:"uses,omitempty"`
With *With `json:"with,omitempty"`
Run string `json:"run,omitempty"`
}
type With struct {
Version string `json:"version,omitempty"`
}