Avastancu/arc e2e test linux vm (#2285)
This commit is contained in:
parent
149cf47c83
commit
910269aa11
|
|
@ -0,0 +1,36 @@
|
||||||
|
name: 'E2E ARC Test Action'
|
||||||
|
description: 'Includes common arc installation, setup and test file run'
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
github-token:
|
||||||
|
description: 'JWT generated with Github App inputs'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Install ARC
|
||||||
|
run: helm install arc --namespace "arc-systems" --create-namespace ./charts/actions-runner-controller-2
|
||||||
|
shell: bash
|
||||||
|
- name: Get datetime
|
||||||
|
# We are using this value further in the runner installation to avoid runner name collision that are a risk with hard coded values.
|
||||||
|
# A datetime including the 3 nanoseconds are a good option for this and also adds to readability and runner sorting if needed.
|
||||||
|
run: echo "DATE_TIME=$(date +'%Y-%m-%d-%H-%M-%S-%3N')" >> $GITHUB_ENV
|
||||||
|
shell: bash
|
||||||
|
- name: Install runners
|
||||||
|
run: |
|
||||||
|
helm install "arc-runner-${{ env.DATE_TIME }}" \
|
||||||
|
--namespace "arc-runners" \
|
||||||
|
--create-namespace \
|
||||||
|
--set githubConfigUrl="https://github.com/actions/actions-runner-controller" \
|
||||||
|
--set githubConfigSecret.github_token="${{ inputs.github-token }}" \
|
||||||
|
./charts/auto-scaling-runner-set \
|
||||||
|
--debug
|
||||||
|
kubectl get pods -A
|
||||||
|
shell: bash
|
||||||
|
- name: Test ARC scales pods up and down
|
||||||
|
run: |
|
||||||
|
export GITHUB_TOKEN="${{ inputs.github-token }}"
|
||||||
|
export DATE_TIME="${{ env.DATE_TIME }}"
|
||||||
|
go test ./test_e2e_arc -v
|
||||||
|
shell: bash
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
name: ARC-REUSABLE-WORKFLOW
|
name: ARC Reusable Workflow
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,23 @@
|
||||||
|
name: CI ARC E2E Linux VM Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
setup-steps:
|
||||||
|
runs-on: [ubuntu-latest]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Create Kind cluster
|
||||||
|
run: |
|
||||||
|
PATH=$(go env GOPATH)/bin:$PATH
|
||||||
|
kind create cluster --name e2e-test
|
||||||
|
- name: Get Token
|
||||||
|
id: get_workflow_token
|
||||||
|
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||||
|
with:
|
||||||
|
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||||
|
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||||
|
- uses: ./.github/actions/e2e-arc-test
|
||||||
|
with:
|
||||||
|
github-token: ${{ steps.get_workflow_token.outputs.token }}
|
||||||
2
Makefile
2
Makefile
|
|
@ -73,7 +73,7 @@ GO_TEST_ARGS ?= -short
|
||||||
|
|
||||||
# Run tests
|
# Run tests
|
||||||
test: generate fmt vet manifests shellcheck
|
test: generate fmt vet manifests shellcheck
|
||||||
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
go test $(GO_TEST_ARGS) `go list ./... | grep -v ./test_e2e_arc` -coverprofile cover.out
|
||||||
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers/actions.summerwind.net
|
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers/actions.summerwind.net
|
||||||
|
|
||||||
test-with-deps: kube-apiserver etcd kubectl
|
test-with-deps: kube-apiserver etcd kubectl
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,132 @@
|
||||||
|
package e2e_arc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
type podCountsByType struct {
|
||||||
|
controllers int
|
||||||
|
listeners int
|
||||||
|
runners int
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPodsByType(clientset *kubernetes.Clientset) podCountsByType {
|
||||||
|
arc_namespace := "arc-systems"
|
||||||
|
availableArcPods, err := clientset.CoreV1().Pods(arc_namespace).List(context.TODO(), metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
runners_namespace := "arc-runners"
|
||||||
|
availableRunnerPods, err := clientset.CoreV1().Pods(runners_namespace).List(context.TODO(), metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
podsByType := podCountsByType{}
|
||||||
|
for _, pod := range availableArcPods.Items {
|
||||||
|
if strings.Contains(pod.Name, "controller") {
|
||||||
|
podsByType.controllers += 1
|
||||||
|
}
|
||||||
|
if strings.Contains(pod.Name, "listener") {
|
||||||
|
podsByType.listeners += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, pod := range availableRunnerPods.Items {
|
||||||
|
if strings.Contains(pod.Name, "runner") {
|
||||||
|
podsByType.runners += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return podsByType
|
||||||
|
}
|
||||||
|
|
||||||
|
func pollForClusterState(clientset *kubernetes.Clientset, expectedPodsCount podCountsByType, maxTime int) bool {
|
||||||
|
sleepTime := 5
|
||||||
|
maxRetries := maxTime / sleepTime
|
||||||
|
success := false
|
||||||
|
for i := 0; i <= maxRetries; i++ {
|
||||||
|
time.Sleep(time.Second * time.Duration(sleepTime))
|
||||||
|
availablePodsCount := getPodsByType(clientset)
|
||||||
|
if availablePodsCount == expectedPodsCount {
|
||||||
|
success = true
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%v", availablePodsCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return success
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestARCJobs(t *testing.T) {
|
||||||
|
configFile := filepath.Join(
|
||||||
|
os.Getenv("HOME"), ".kube", "config",
|
||||||
|
)
|
||||||
|
|
||||||
|
config, err := clientcmd.BuildConfigFromFlags("", configFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
clientset, err := kubernetes.NewForConfig(config)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Get available pods before job run", func(t *testing.T) {
|
||||||
|
expectedPodsCount := podCountsByType{1, 1, 0}
|
||||||
|
success := pollForClusterState(clientset, expectedPodsCount, 60)
|
||||||
|
if !success {
|
||||||
|
t.Fatal("Expected pods count did not match available pods count before job run.")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
t.Run("Get available pods during job run", func(t *testing.T) {
|
||||||
|
c := http.Client{}
|
||||||
|
dateTime := os.Getenv("DATE_TIME")
|
||||||
|
// We are triggering manually a workflow that already exists in the repo.
|
||||||
|
// This workflow is expected to spin up a number of runner pods matching the runners value set in podCountsByType.
|
||||||
|
url := "https://api.github.com/repos/actions/actions-runner-controller/actions/workflows/e2e-test-dispatch-workflow.yaml/dispatches"
|
||||||
|
jsonStr := []byte(fmt.Sprintf(`{"ref":"master", "inputs":{"date_time":"%s"}}`, dateTime))
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
ght := os.Getenv("GITHUB_TOKEN")
|
||||||
|
req.Header.Add("Accept", "application/vnd.github+json")
|
||||||
|
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ght))
|
||||||
|
req.Header.Add("X-GitHub-Api-Version", "2022-11-28")
|
||||||
|
|
||||||
|
resp, err := c.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
expectedPodsCount := podCountsByType{1, 1, 3}
|
||||||
|
success := pollForClusterState(clientset, expectedPodsCount, 120)
|
||||||
|
if !success {
|
||||||
|
t.Fatal("Expected pods count did not match available pods count during job run.")
|
||||||
|
}
|
||||||
|
|
||||||
|
},
|
||||||
|
)
|
||||||
|
t.Run("Get available pods after job run", func(t *testing.T) {
|
||||||
|
expectedPodsCount := podCountsByType{1, 1, 0}
|
||||||
|
success := pollForClusterState(clientset, expectedPodsCount, 120)
|
||||||
|
if !success {
|
||||||
|
t.Fatal("Expected pods count did not match available pods count after job run.")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue