Merge branch 'master' into dependabot/go_modules/gomod-c800bf9c5b

This commit is contained in:
Nikola Jokic 2025-11-21 14:26:04 +01:00 committed by GitHub
commit ad709932fe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
29 changed files with 1614 additions and 148 deletions

View File

@ -26,6 +26,29 @@ concurrency:
cancel-in-progress: true
jobs:
default-setup-v2:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
steps:
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.E2E_TESTS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.E2E_TESTS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Run default setup test
run: hack/e2e-test.sh default-setup
env:
GITHUB_TOKEN: "${{steps.config-token.outputs.token}}"
shell: bash
default-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
@ -117,6 +140,29 @@ jobs:
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
single-namespace-setup-v2:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
steps:
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.E2E_TESTS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.E2E_TESTS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Run single namespace setup test
run: hack/e2e-test.sh single-namespace-setup
env:
GITHUB_TOKEN: "${{steps.config-token.outputs.token}}"
shell: bash
single-namespace-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
@ -210,6 +256,29 @@ jobs:
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
dind-mode-setup-v2:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
steps:
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.E2E_TESTS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.E2E_TESTS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Run dind mode setup test
run: hack/e2e-test.sh dind-mode-setup
env:
GITHUB_TOKEN: "${{steps.config-token.outputs.token}}"
shell: bash
dind-mode-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
@ -302,6 +371,29 @@ jobs:
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
kubernetes-mode-setup-v2:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
steps:
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.E2E_TESTS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.E2E_TESTS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Run kubernetes mode setup test
run: hack/e2e-test.sh kubernetes-mode-setup
env:
GITHUB_TOKEN: "${{steps.config-token.outputs.token}}"
shell: bash
kubernetes-mode-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
@ -403,6 +495,29 @@ jobs:
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
auth-proxy-setup-v2:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
steps:
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.E2E_TESTS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.E2E_TESTS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Run single namespace setup test
run: hack/e2e-test.sh single-namespace-setup
env:
GITHUB_TOKEN: "${{steps.config-token.outputs.token}}"
shell: bash
auth-proxy-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
@ -506,6 +621,29 @@ jobs:
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
anonymous-proxy-setup-v2:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
steps:
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.E2E_TESTS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.E2E_TESTS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Run anonymous proxy setup test
run: hack/e2e-test.sh anonymous-proxy-setup
env:
GITHUB_TOKEN: "${{steps.config-token.outputs.token}}"
shell: bash
anonymous-proxy-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
@ -603,6 +741,29 @@ jobs:
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
self-signed-ca-setup-v2:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
steps:
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.E2E_TESTS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.E2E_TESTS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Run self signed CA setup test
run: hack/e2e-test.sh self-signed-ca-setup
env:
GITHUB_TOKEN: "${{steps.config-token.outputs.token}}"
shell: bash
self-signed-ca-setup:
runs-on: ubuntu-latest
timeout-minutes: 20
@ -725,6 +886,29 @@ jobs:
arc-namespace: "arc-runners"
arc-controller-namespace: "arc-systems"
update-strategy-tests-v2:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
steps:
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.E2E_TESTS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.E2E_TESTS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Run update strategy test
run: hack/e2e-test.sh update-strategy
env:
GITHUB_TOKEN: "${{steps.config-token.outputs.token}}"
shell: bash
update-strategy-tests:
runs-on: ubuntu-latest
timeout-minutes: 20
@ -897,6 +1081,29 @@ jobs:
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
init-with-min-runners-v2:
runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
steps:
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.E2E_TESTS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.E2E_TESTS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Run init with min runners test
run: hack/e2e-test.sh init-with-min-runners
env:
GITHUB_TOKEN: "${{steps.config-token.outputs.token}}"
shell: bash
init-with-min-runners:
runs-on: ubuntu-latest
timeout-minutes: 20

View File

@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.329.0
RUNNER_VERSION ?= 2.330.0
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}
@ -210,8 +210,6 @@ docker-buildx:
docker buildx create --platform ${PLATFORMS} --name container-builder --use;\
fi
docker buildx build --platform ${PLATFORMS} \
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
--build-arg VERSION=${VERSION} \
--build-arg COMMIT_SHA=${COMMIT_SHA} \
-t "${DOCKER_IMAGE_NAME}:${VERSION}" \
@ -297,6 +295,10 @@ acceptance/runner/startup:
e2e:
go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
.PHONY: gha-e2e
gha-e2e:
bash hack/e2e-test.sh
# Upload release file to GitHub.
github-release: release
ghr ${VERSION} release/

View File

@ -26,9 +26,8 @@ import (
)
const (
autoscalingListenerTestTimeout = time.Second * 20
autoscalingListenerTestInterval = time.Millisecond * 250
autoscalingListenerTestGitHubToken = "gh_token"
autoscalingListenerTestTimeout = time.Second * 20
autoscalingListenerTestInterval = time.Millisecond * 250
)
var _ = Describe("Test AutoScalingListener controller", func() {

View File

@ -48,7 +48,7 @@ const (
annotationKeyValuesHash = "actions.github.com/values-hash"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
runnerScaleSetIDAnnotationKey = "runner-scale-set-id"
)
type UpdateStrategy string
@ -180,14 +180,14 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil
}
scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
scaleSetIDRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey]
if !ok {
// Need to create a new runner scale set on Actions service
log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.")
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
}
if id, err := strconv.Atoi(scaleSetIdRaw); err != nil || id <= 0 {
if id, err := strconv.Atoi(scaleSetIDRaw); err != nil || id <= 0 {
log.Info("Runner scale set id annotation is not an id, or is <= 0. Creating a new runner scale set.")
// something modified the scaleSetId. Try to create one
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
@ -403,7 +403,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
return ctrl.Result{}, err
}
runnerGroupId := 1
runnerGroupID := 1
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
if err != nil {
@ -411,14 +411,14 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
return ctrl.Result{}, err
}
runnerGroupId = int(runnerGroup.ID)
runnerGroupID = int(runnerGroup.ID)
}
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, runnerGroupId, autoscalingRunnerSet.Spec.RunnerScaleSetName)
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, runnerGroupID, autoscalingRunnerSet.Spec.RunnerScaleSetName)
if err != nil {
logger.Error(err, "Failed to get runner scale set from Actions service",
"runnerGroupId",
strconv.Itoa(runnerGroupId),
strconv.Itoa(runnerGroupID),
"runnerScaleSetName",
autoscalingRunnerSet.Spec.RunnerScaleSetName)
return ctrl.Result{}, err
@ -429,7 +429,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
ctx,
&actions.RunnerScaleSet{
Name: autoscalingRunnerSet.Spec.RunnerScaleSetName,
RunnerGroupId: runnerGroupId,
RunnerGroupId: runnerGroupID,
Labels: []actions.Label{
{
Name: autoscalingRunnerSet.Spec.RunnerScaleSetName,
@ -466,7 +466,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels")
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = runnerScaleSet.Name
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[runnerScaleSetIDAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
logger.Error(err, "Failed to apply GitHub URL labels")
@ -484,7 +484,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
}
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
runnerScaleSetID, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey])
if err != nil {
logger.Error(err, "Failed to parse runner scale set ID")
return ctrl.Result{}, err
@ -496,7 +496,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
return ctrl.Result{}, err
}
runnerGroupId := 1
runnerGroupID := 1
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
if err != nil {
@ -504,12 +504,12 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
return ctrl.Result{}, err
}
runnerGroupId = int(runnerGroup.ID)
runnerGroupID = int(runnerGroup.ID)
}
updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{RunnerGroupId: runnerGroupId})
updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetID, &actions.RunnerScaleSet{RunnerGroupId: runnerGroupID})
if err != nil {
logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetId)
logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetID)
return ctrl.Result{}, err
}
@ -527,7 +527,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
}
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
runnerScaleSetID, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey])
if err != nil {
logger.Error(err, "Failed to parse runner scale set ID")
return ctrl.Result{}, err
@ -544,9 +544,9 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
return ctrl.Result{}, err
}
updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{Name: autoscalingRunnerSet.Spec.RunnerScaleSetName})
updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetID, &actions.RunnerScaleSet{Name: autoscalingRunnerSet.Spec.RunnerScaleSetName})
if err != nil {
logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetId)
logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetID)
return ctrl.Result{}, err
}
@ -563,7 +563,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
}
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
scaleSetID, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey]
if !ok {
// Annotation not being present can occur in 3 scenarios
// 1. Scale set is never created.
@ -580,7 +580,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
return nil
}
logger.Info("Deleting the runner scale set from Actions service")
runnerScaleSetId, err := strconv.Atoi(scaleSetId)
runnerScaleSetID, err := strconv.Atoi(scaleSetID)
if err != nil {
// If the annotation is not set correctly, we are going to get stuck in a loop trying to parse the scale set id.
// If the configuration is invalid (secret does not exist for example), we never got to the point to create runner set.
@ -595,17 +595,17 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
return err
}
err = actionsClient.DeleteRunnerScaleSet(ctx, runnerScaleSetId)
err = actionsClient.DeleteRunnerScaleSet(ctx, runnerScaleSetID)
if err != nil {
logger.Error(err, "Failed to delete runner scale set", "runnerScaleSetId", runnerScaleSetId)
logger.Error(err, "Failed to delete runner scale set", "runnerScaleSetId", runnerScaleSetID)
return err
}
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
delete(obj.Annotations, runnerScaleSetIdAnnotationKey)
delete(obj.Annotations, runnerScaleSetIDAnnotationKey)
})
if err != nil {
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIdAnnotationKey)
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIDAnnotationKey)
return err
}
@ -1006,6 +1006,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali
// NOTE: if this is logic should be used for other resources,
// consider using generics
type EphemeralRunnerSets struct {
list *v1alpha1.EphemeralRunnerSetList
sorted bool

View File

@ -34,9 +34,8 @@ import (
)
const (
autoscalingRunnerSetTestTimeout = time.Second * 20
autoscalingRunnerSetTestInterval = time.Millisecond * 250
autoscalingRunnerSetTestGitHubToken = "gh_token"
autoscalingRunnerSetTestTimeout = time.Second * 20
autoscalingRunnerSetTestInterval = time.Millisecond * 250
)
var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
@ -141,7 +140,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", err
}
if _, ok := created.Annotations[runnerScaleSetIdAnnotationKey]; !ok {
if _, ok := created.Annotations[runnerScaleSetIDAnnotationKey]; !ok {
return "", nil
}
@ -149,7 +148,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", nil
}
return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIdAnnotationKey], created.Annotations[AnnotationKeyGitHubRunnerGroupName]), nil
return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIDAnnotationKey], created.Annotations[AnnotationKeyGitHubRunnerGroupName]), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1_testgroup"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation")

View File

@ -36,7 +36,8 @@ const (
LabelKeyGitHubRepository = "actions.github.com/repository"
)
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
// AutoscalingRunnerSetCleanupFinalizerName is a finalizer used to protect resources
// from deletion while AutoscalingRunnerSet is running
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
const (

View File

@ -154,31 +154,17 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
log.Info("Adding finalizer")
addFinalizers := !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) || !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName)
if addFinalizers {
log.Info("Adding finalizers")
if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerFinalizerName)
controllerutil.AddFinalizer(obj, ephemeralRunnerActionsFinalizerName)
}); err != nil {
log.Error(err, "Failed to update with finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added finalizer")
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerActionsFinalizerName)
})
if err != nil {
log.Error(err, "Failed to update with runner registration finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added runner registration finalizer")
return ctrl.Result{}, nil
log.Info("Successfully added finalizers")
}
secret := new(corev1.Secret)
@ -703,7 +689,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
}
log.Info("Creating new pod for ephemeral runner")
newPod := r.newEphemeralRunnerPod(ctx, runner, secret, envs...)
newPod := r.newEphemeralRunnerPod(runner, secret, envs...)
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
log.Error(err, "Failed to set controller reference to a new pod")

View File

@ -32,9 +32,8 @@ import (
)
const (
ephemeralRunnerSetTestTimeout = time.Second * 20
ephemeralRunnerSetTestInterval = time.Millisecond * 250
ephemeralRunnerSetTestGitHubToken = "gh_token"
ephemeralRunnerSetTestTimeout = time.Second * 20
ephemeralRunnerSetTestInterval = time.Millisecond * 250
)
func TestPrecomputedConstants(t *testing.T) {
@ -119,8 +118,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Consistently(
func() (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
return -1, err
}
@ -153,8 +151,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
return -1, err
}
@ -172,8 +169,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
if refetch {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
return -1, err
}
}
@ -215,8 +211,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
return -1, err
}
@ -234,8 +229,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
if refetch {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
return -1, err
}
}
@ -253,8 +247,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
return -1, err
}
@ -299,8 +292,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
return -1, err
}
@ -326,8 +318,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
return -1, err
}
@ -351,7 +342,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -378,7 +369,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -403,7 +394,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -429,7 +420,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -456,7 +447,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -481,7 +472,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Consistently(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -508,7 +499,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -545,7 +536,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// We should have 3 runners, and have no Succeeded ones
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return err
}
@ -582,7 +573,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -607,7 +598,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return err
}
@ -648,7 +639,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// We should have 1 runner up and pending
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return err
}
@ -675,7 +666,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -702,7 +693,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -728,7 +719,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return err
}
@ -772,8 +763,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Consistently(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
return -1, err
}
@ -799,7 +789,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -826,7 +816,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -853,7 +843,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return err
}
@ -897,7 +887,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return err
}
@ -938,7 +928,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (bool, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return false, err
}
@ -1046,7 +1036,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
runnerList = new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -1208,7 +1198,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
Eventually(func(g Gomega) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
g.Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunners")
for _, runner := range runnerList.Items {
@ -1226,7 +1216,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
Eventually(
func(g Gomega) (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -1245,7 +1235,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
}
if refetch {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -1260,6 +1250,18 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
err = k8sClient.Delete(ctx, ephemeralRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunnerSet")
Eventually(func(g Gomega) (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(0), "EphemeralRunners should be deleted")
// Assert that the proxy secret is deleted
Eventually(func(g Gomega) {
proxySecret := &corev1.Secret{}
@ -1343,7 +1345,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -1490,7 +1492,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
@ -1529,3 +1531,27 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
).Should(BeTrue(), "server was not called")
})
})
// helper function to remove ephemeral runners since in the test, ephemeral runner reconciler is not started
func listEphemeralRunnersAndRemoveFinalizers(ctx context.Context, k8sClient client.Client, list *v1alpha1.EphemeralRunnerList, namespace string) error {
err := k8sClient.List(ctx, list, client.InNamespace(namespace))
if err != nil {
return err
}
// Since we are not starting ephemeral runner reconciler, ignore
liveItems := make([]v1alpha1.EphemeralRunner, 0)
for _, item := range list.Items {
if !item.DeletionTimestamp.IsZero() {
if err := patch(ctx, k8sClient, &item, func(runner *v1alpha1.EphemeralRunner) {
runner.Finalizers = []string{}
}); err != nil {
return err
}
continue
}
liveItems = append(liveItems, item)
}
list.Items = liveItems
return nil
}

View File

@ -42,11 +42,11 @@ func createNamespace(t ginkgo.GinkgoTInterface, client client.Client) (*corev1.N
ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)},
}
err := k8sClient.Create(context.Background(), ns)
err := client.Create(context.Background(), ns)
require.NoError(t, err)
t.Cleanup(func() {
err := k8sClient.Delete(context.Background(), ns)
err := client.Delete(context.Background(), ns)
require.NoError(t, err)
})

View File

@ -2,7 +2,6 @@ package actionsgithubcom
import (
"bytes"
"context"
"encoding/json"
"fmt"
"maps"
@ -83,7 +82,7 @@ func boolPtr(v bool) *bool {
}
func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
runnerScaleSetID, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey])
if err != nil {
return nil, err
}
@ -125,7 +124,7 @@ func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
VaultConfig: autoscalingRunnerSet.VaultConfig(),
RunnerScaleSetId: runnerScaleSetId,
RunnerScaleSetId: runnerScaleSetID,
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
@ -496,7 +495,7 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
}
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
runnerScaleSetID, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey])
if err != nil {
return nil, err
}
@ -541,7 +540,7 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: 0,
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
RunnerScaleSetId: runnerScaleSetId,
RunnerScaleSetId: runnerScaleSetID,
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
Proxy: autoscalingRunnerSet.Spec.Proxy,
@ -556,28 +555,23 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
}
func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
labels := make(map[string]string)
for k, v := range ephemeralRunnerSet.Labels {
if k == LabelKeyKubernetesComponent {
labels[k] = "runner"
} else {
labels[k] = v
}
}
annotations := make(map[string]string)
for key, val := range ephemeralRunnerSet.Annotations {
annotations[key] = val
}
labels := make(map[string]string, len(ephemeralRunnerSet.Labels))
maps.Copy(labels, ephemeralRunnerSet.Labels)
labels[LabelKeyKubernetesComponent] = "runner"
annotations := make(map[string]string, len(ephemeralRunnerSet.Annotations)+1)
maps.Copy(annotations, ephemeralRunnerSet.Annotations)
annotations[AnnotationKeyPatchID] = strconv.Itoa(ephemeralRunnerSet.Spec.PatchID)
return &v1alpha1.EphemeralRunner{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: ephemeralRunnerSet.Name + "-runner-",
Namespace: ephemeralRunnerSet.Namespace,
Labels: labels,
Annotations: annotations,
Finalizers: []string{
ephemeralRunnerFinalizerName,
ephemeralRunnerActionsFinalizerName,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: ephemeralRunnerSet.GetObjectKind().GroupVersionKind().GroupVersion().String(),
@ -593,27 +587,17 @@ func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
}
}
func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
func (b *ResourceBuilder) newEphemeralRunnerPod(runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
var newPod corev1.Pod
labels := map[string]string{}
annotations := map[string]string{}
annotations := make(map[string]string, len(runner.Annotations)+len(runner.Spec.Annotations))
maps.Copy(annotations, runner.Annotations)
maps.Copy(annotations, runner.Spec.Annotations)
for k, v := range runner.Labels {
labels[k] = v
}
for k, v := range runner.Spec.Labels {
labels[k] = v
}
labels := make(map[string]string, len(runner.Labels)+len(runner.Spec.Labels)+2)
maps.Copy(labels, runner.Labels)
maps.Copy(labels, runner.Spec.Labels)
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
for k, v := range runner.Annotations {
annotations[k] = v
}
for k, v := range runner.Spec.Annotations {
annotations[k] = v
}
labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects(
FilterLabels(labels, LabelKeyRunnerTemplateHash),
annotations,

View File

@ -1,7 +1,6 @@
package actionsgithubcom
import (
"context"
"fmt"
"strings"
"testing"
@ -28,7 +27,7 @@ func TestLabelPropagation(t *testing.T) {
"directly.excluded.org/arbitrary": "not-excluded-value",
},
Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1",
runnerScaleSetIDAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
},
@ -104,7 +103,7 @@ func TestLabelPropagation(t *testing.T) {
Name: "test",
},
}
pod := b.newEphemeralRunnerPod(context.TODO(), ephemeralRunner, runnerSecret)
pod := b.newEphemeralRunnerPod(ephemeralRunner, runnerSecret)
for key := range ephemeralRunner.Labels {
assert.Equal(t, ephemeralRunner.Labels[key], pod.Labels[key])
}
@ -124,7 +123,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
LabelKeyKubernetesVersion: "0.2.0",
},
Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1",
runnerScaleSetIDAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
},
@ -190,7 +189,7 @@ func TestOwnershipRelationships(t *testing.T) {
LabelKeyKubernetesVersion: "0.2.0",
},
Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1",
runnerScaleSetIDAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
annotationKeyValuesHash: "test-hash",
@ -233,7 +232,7 @@ func TestOwnershipRelationships(t *testing.T) {
Name: "test-secret",
},
}
pod := b.newEphemeralRunnerPod(context.TODO(), ephemeralRunner, runnerSecret)
pod := b.newEphemeralRunnerPod(ephemeralRunner, runnerSecret)
// Test EphemeralRunnerPod ownership
require.Len(t, pod.OwnerReferences, 1, "EphemeralRunnerPod should have exactly one owner reference")

93
hack/e2e-test.sh Executable file
View File

@ -0,0 +1,93 @@
#!/bin/bash
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
TEST_DIR="$(realpath "${DIR}/../test/actions.github.com")"
export PLATFORMS="linux/amd64"
TARGETS=()
function set_targets() {
local cases
cases="$(find "${TEST_DIR}" -name '*.test.sh' | sed "s#^${TEST_DIR}/##g")"
mapfile -t TARGETS < <(echo "${cases}")
echo "${TARGETS[@]}"
}
function env_test() {
if [[ -z "${GITHUB_TOKEN}" ]]; then
echo "Error: GITHUB_TOKEN is not set"
exit 1
fi
if [[ -z "${TARGET_ORG}" ]]; then
echo "Error: TARGET_ORG is not set"
exit 1
fi
if [[ -z "${TARGET_REPO}" ]]; then
echo "Error: TARGET_REPO is not set"
exit 1
fi
}
function usage() {
echo "Usage: $0 [test_name]"
echo " test_name: the name of the test to run"
echo " if not specified, all tests will be run"
echo " test_name should be the name of the test file without the .test.sh suffix"
echo ""
exit 1
}
function main() {
local failed=()
env_test
if [[ -z "${1}" ]]; then
echo "Running all tests"
set_targets
elif [[ -f "${TEST_DIR}/${1}.test.sh" ]]; then
echo "Running test ${1}"
TARGETS=("${1}.test.sh")
else
usage
fi
for target in "${TARGETS[@]}"; do
echo "============================================================"
test="${TEST_DIR}/${target}"
if [[ ! -x "${test}" ]]; then
echo "Error: test ${test} is not executable or not found"
failed+=("${test}")
continue
fi
echo "Running test ${target}"
if ! "${test}"; then
failed+=("${target}")
echo "---------------------------------"
echo "FAILED: ${target}"
else
echo "---------------------------------"
echo "PASSED: ${target}"
fi
echo "============================================================"
done
if [[ "${#failed[@]}" -gt 0 ]]; then
echo "Failed tests:"
for fail in "${failed[@]}"; do
echo " ${fail}"
done
exit 1
fi
}
main "$@"

View File

@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.329.0
RUNNER_VERSION ?= 2.330.0
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.8.0
DOCKER_VERSION ?= 28.0.4

View File

@ -1,2 +1,2 @@
RUNNER_VERSION=2.329.0
RUNNER_VERSION=2.330.0
RUNNER_CONTAINER_HOOKS_VERSION=0.8.0

View File

@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: squid
spec:
replicas: 1
selector:
matchLabels:
app: squid
template:
metadata:
labels:
app: squid
spec:
containers:
- name: squid
image: ubuntu/squid:latest
ports:
- containerPort: 3128
---
apiVersion: v1
kind: Service
metadata:
name: squid
spec:
selector:
app: squid
ports:
- protocol: TCP
port: 3128
targetPort: 3128

View File

@ -0,0 +1,87 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="anonymous-proxy-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_squid() {
echo "Starting squid-proxy"
kubectl apply -f "${DIR}/anonymous-proxy-setup.squid.yaml"
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set proxy.https.url="http://squid.default.svc.cluster.local:3128" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_squid
install_scale_set || {
echo "Scale set installation failed"
NAMESPACE="${ARC_NAMESPACE}" log_arc
delete_cluster
exit 1
}
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: squid
spec:
replicas: 1
selector:
matchLabels:
app: squid
template:
metadata:
labels:
app: squid
spec:
containers:
- name: squid
image: huangtingluo/squid-proxy:latest
ports:
- containerPort: 3128
---
apiVersion: v1
kind: Service
metadata:
name: squid
spec:
selector:
app: squid
ports:
- protocol: TCP
port: 3128
targetPort: 3128

View File

@ -0,0 +1,102 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="default-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
install_openebs || {
echo "OpenEBS installation failed"
return 1
}
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_squid() {
echo "Starting squid-proxy"
kubectl apply -f "${DIR}/auth-proxy-setup.squid.yaml"
echo "Creating scale set namespace"
kubectl create namespace "${SCALE_SET_NAMESPACE}" || true
echo "Creating squid proxy secret"
kubectl create secret generic proxy-auth \
--namespace=arc-runners \
--from-literal=username=github \
--from-literal=password='actions'
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set proxy.https.url="http://squid.default.svc.cluster.local:3128" \
--set proxy.https.credentialSecretRef="proxy-auth" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_squid
install_scale_set || {
echo "Scale set installation failed"
NAMESPACE="${ARC_NAMESPACE}" log_arc
delete_cluster
exit 1
}
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@ -0,0 +1,73 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="default-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@ -0,0 +1,74 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="default-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-dind-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set containerMode.type="dind" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@ -0,0 +1,3 @@
export TARGET_ORG="org"
export TARGET_REPO="repo"
export GITHUB_TOKEN="token"

View File

@ -0,0 +1,196 @@
#!/bin/bash
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(realpath "${DIR}/../..")"
export TARGET_ORG="${TARGET_ORG:-actions-runner-controller}"
export TARGET_REPO="${TARGET_REPO:-arc_e2e_test_dummy}"
export IMAGE_NAME="${IMAGE_NAME:-arc-test-image}"
export VERSION="${VERSION:-$(yq .version <"${ROOT_DIR}/charts/gha-runner-scale-set-controller/Chart.yaml")}"
export IMAGE_TAG="${VERSION}"
export IMAGE="${IMAGE_NAME}:${IMAGE_TAG}"
export PLATFORMS="linux/amd64"
COMMIT_SHA="$(git rev-parse HEAD)"
export COMMIT_SHA
function build_image() {
echo "Building ARC image ${IMAGE}"
cd "${ROOT_DIR}" || exit 1
docker buildx build --platform "${PLATFORMS}" \
--build-arg VERSION="${VERSION}" \
--build-arg COMMIT_SHA="${COMMIT_SHA}" \
-t "${IMAGE}" \
-f Dockerfile \
. --load
echo "Created image ${IMAGE}"
cd - || exit 1
}
function create_cluster() {
echo "Deleting minikube cluster if exists"
minikube delete || true
echo "Creating minikube cluster"
minikube start --driver=docker --container-runtime=docker --wait=all
echo "Verifying ns works"
if ! minikube ssh "nslookup github.com >/dev/null 2>&1"; then
echo "Nameserver configuration failed"
exit 1
fi
echo "Loading image into minikube cluster"
minikube image load "${IMAGE}"
echo "Loading runner image into minikube cluster"
minikube image load "ghcr.io/actions/actions-runner:latest"
}
function delete_cluster() {
echo "Deleting minikube cluster"
minikube delete
}
function log_arc() {
echo "ARC logs"
kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller
}
function wait_for_arc() {
echo "Waiting for ARC to be ready"
local count=0
while true; do
POD_NAME=$(kubectl get pods -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
return 1
fi
sleep 1
count=$((count + 1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller
kubectl get pod -n "${NAMESPACE}"
kubectl describe deployment "${NAME}" -n "${NAMESPACE}"
}
function wait_for_scale_set() {
local count=0
while true; do
POD_NAME=$(kubectl get pods -n "${NAMESPACE}" -l "actions.github.com/scale-set-name=${NAME}" -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: ${POD_NAME}"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=${NAME}"
return 1
fi
sleep 1
count=$((count + 1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n "${NAMESPACE}" -l "actions.github.com/scale-set-name=${NAME}"
kubectl get pod -n "${NAMESPACE}" -l "actions.github.com/scale-set-name=${NAME}"
}
function cleanup_scale_set() {
helm uninstall "${INSTALLATION_NAME}" --namespace "${NAMESPACE}" --debug
kubectl wait --timeout=40s --for=delete autoscalingrunnersets -n "${NAMESPACE}" -l app.kubernetes.io/instance="${INSTALLATION_NAME}"
}
function print_results() {
local failed=("$@")
if [[ "${#failed[@]}" -ne 0 ]]; then
echo "----------------------------------"
echo "The following tests failed:"
for test in "${failed[@]}"; do
echo " - ${test}"
done
return 1
else
echo "----------------------------------"
echo "All tests passed!"
fi
}
function run_workflow() {
echo "Checking if the workflow file exists"
gh workflow view -R "${TARGET_ORG}/${TARGET_REPO}" "${WORKFLOW_FILE}" || return 1
local queue_time
queue_time="$(date -u +%FT%TZ)"
echo "Running workflow ${WORKFLOW_FILE}"
gh workflow run -R "${TARGET_ORG}/${TARGET_REPO}" "${WORKFLOW_FILE}" --ref main -f arc_name="${SCALE_SET_NAME}" || return 1
echo "Waiting for run to start"
local count=0
local run_id=
while true; do
if [[ "${count}" -ge 12 ]]; then
echo "Timeout waiting for run to start"
return 1
fi
run_id=$(gh run list -R "${TARGET_ORG}/${TARGET_REPO}" --workflow "${WORKFLOW_FILE}" --created ">${queue_time}" --json "name,databaseId" --jq ".[] | select(.name | contains(\"${SCALE_SET_NAME}\")) | .databaseId")
echo "Run ID: ${run_id}"
if [ -n "$run_id" ]; then
echo "Run found!"
break
fi
echo "Run not found yet, waiting 5 seconds"
sleep 5
count=$((count + 1))
done
echo "Waiting for run to complete"
local code
code=$(gh run watch "${run_id}" -R "${TARGET_ORG}/${TARGET_REPO}" --exit-status &>/dev/null)
if [[ "${code}" -ne 0 ]]; then
echo "Run failed with exit code ${code}"
return 1
fi
echo "Run completed successfully"
}
function retry() {
local retries=$1
shift
local delay=$1
shift
local n=1
until "$@"; do
if [[ $n -ge $retries ]]; then
echo "Attempt $n failed! No more retries left."
return 1
else
echo "Attempt $n failed! Retrying in $delay seconds..."
sleep "$delay"
n=$((n + 1))
fi
done
}
function install_openebs() {
echo "Install openebs/dynamic-localpv-provisioner"
helm repo add openebs https://openebs.github.io/openebs
helm repo update
helm install openebs openebs/openebs -n openebs --create-namespace
}

View File

@ -0,0 +1,94 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh" || {
echo "Failed to source helper.sh"
exit 1
}
SCALE_SET_NAME="init-min-runners-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
--set flags.updateStrategy="eventual" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set minRunners=5 \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function assert_5_runners() {
echo "[*] Asserting 5 runners are created"
local count=0
while true; do
pod_count=$(kubectl get pods -n arc-runners --no-headers | wc -l)
if [[ "${pod_count}" = 5 ]]; then
echo "[*] Found 5 runners as expected"
break
fi
if [[ "$count" -ge 30 ]]; then
echo "Timeout waiting for 5 pods to be created"
exit 1
fi
sleep 1
count=$((count + 1))
done
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
assert_5_runners || failed+=("assert_5_runners")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@ -0,0 +1,81 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="kubernetes-mode-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-kubernetes-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
install_openebs || {
echo "OpenEBS installation failed"
return 1
}
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set containerMode.type="kubernetes" \
--set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \
--set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \
--set containerMode.kubernetesModeWorkVolumeClaim.resources.requests.storage="1Gi" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@ -0,0 +1,29 @@
apiVersion: v1
kind: Pod
metadata:
name: mitmproxy
namespace: mitmproxy
labels:
app: mitmproxy
spec:
containers:
- name: mitmproxy
image: mitmproxy/mitmproxy:latest
command: ["mitmdump"]
ports:
- containerPort: 8080
name: proxy
---
apiVersion: v1
kind: Service
metadata:
name: mitmproxy
namespace: mitmproxy
spec:
selector:
app: mitmproxy
ports:
- port: 8080
targetPort: 8080
name: proxy

View File

@ -0,0 +1,148 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh" || {
echo "Failed to source helper.sh"
exit 1
}
TEMP_DIR=$(mktemp -d)
LOCAL_CERT_PATH="${TEMP_DIR}/mitmproxy-ca-cert.crt"
MITM_CERT_PATH="/root/.mitmproxy/mitmproxy-ca-cert.pem"
trap 'rm -rf "$TEMP_DIR"' EXIT
SCALE_SET_NAME="self-signed-crt-$(date '+%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
MITMPROXY_NAMESPACE="mitmproxy"
MITMPROXY_POD_NAME="mitmproxy"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Creating namespace ${SCALE_SET_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ca-cert config map"
kubectl -n "${SCALE_SET_NAMESPACE}" create configmap ca-cert \
--from-file=mitmproxy-ca-cert.crt="${LOCAL_CERT_PATH}"
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set proxy.https.url="http://mitmproxy.mitmproxy.svc.cluster.local:8080" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function wait_for_mitmproxy_ready() {
echo "Waiting for mitmproxy pod to be ready"
# Wait for pod to be running
if ! kubectl wait --for=condition=ready pod -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" --timeout=60s; then
echo "Timeout waiting for mitmproxy pod"
kubectl get pods -n "${MITMPROXY_NAMESPACE}" || true
kubectl describe pod -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" || true
kubectl logs -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" || true
return 1
fi
echo "Mitmproxy pod is ready, trying to copy the certitficate..."
# Verify certificate exists
retry 15 1 kubectl exec -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" -- test -f "${MITM_CERT_PATH}"
echo "Getting mitmproxy CA certificate from pod"
if ! kubectl exec -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" -- cat "${MITM_CERT_PATH}" >"${LOCAL_CERT_PATH}"; then
echo "Failed to get mitmproxy CA certificate from pod"
return 1
fi
echo "Mitmproxy certificate generated successfully and stored to ${LOCAL_CERT_PATH}"
return 0
}
function run_mitmproxy() {
echo "Deploying mitmproxy to Kubernetes"
# Create namespace
kubectl create namespace "${MITMPROXY_NAMESPACE}" || true
# Create mitmproxy pod and service
kubectl apply -f "${DIR}/self-signed-ca-setup.mitm.yaml"
if ! wait_for_mitmproxy_ready; then
return 1
fi
echo "Mitmproxy is ready"
}
function main() {
local failed=()
build_image
create_cluster
install_arc
run_mitmproxy || {
echo "Failed to run mitmproxy"
echo "ARC logs:"
NAMESPACE="${ARC_NAMESPACE}" log_arc
echo "Deleting cluster..."
delete_cluster
exit 1
}
install_scale_set || {
echo "Failed to run mitmproxy"
echo "ARC logs:"
NAMESPACE="${ARC_NAMESPACE}" log_arc
echo "Deleting cluster..."
delete_cluster
exit 1
}
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@ -0,0 +1,74 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="default-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="${SCALE_SET_NAMESPACE}"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
--set flags.watchSingleNamespace="${ARC_NAMESPACE}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@ -0,0 +1,146 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh" || {
echo "Failed to source helper.sh"
exit 1
}
SCALE_SET_NAME="update-strategy-$(date '+%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-sleepy-matrix.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
--set flags.updateStrategy="eventual" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function upgrade_scale_set() {
echo "Upgrading scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm upgrade "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set template.spec.containers[0].name="runner" \
--set template.spec.containers[0].image="ghcr.io/actions/actions-runner:latest" \
--set template.spec.containers[0].command={"/home/runner/run.sh"} \
--set template.spec.containers[0].env[0].name="TEST" \
--set template.spec.containers[0].env[0].value="E2E TESTS" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
}
function assert_listener_deleted() {
local count=0
while true; do
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name="${SCALE_SET_NAME}" -n "${ARC_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n "${SCALE_SET_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RESOURCES="$(kubectl get pods -A)"
if [ "${LISTENER_COUNT}" -eq 0 ]; then
echo "Listener has been deleted"
echo "${RESOURCES}"
return 0
fi
if [ "${count}" -ge 60 ]; then
echo "Timeout waiting for listener to be deleted"
echo "${RESOURCES}"
return 1
fi
echo "Waiting for listener to be deleted"
echo "Listener count: ${LISTENER_COUNT} target: 0 | Runners count: ${RUNNERS_COUNT} target: 3"
sleep 1
count=$((count + 1))
done
}
function assert_listener_recreated() {
count=0
while true; do
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name="${SCALE_SET_NAME}" -n "${ARC_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n "${SCALE_SET_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RESOURCES="$(kubectl get pods -A)"
if [ "${LISTENER_COUNT}" -eq 1 ]; then
echo "Listener is up!"
echo "${RESOURCES}"
return 0
fi
if [ "${count}" -ge 120 ]; then
echo "Timeout waiting for listener to be recreated"
echo "${RESOURCES}"
return 1
fi
echo "Waiting for listener to be recreated"
echo "Listener count: ${LISTENER_COUNT} target: 1 | Runners count: ${RUNNERS_COUNT} target: 0"
sleep 1
count=$((count + 1))
done
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
upgrade_scale_set || failed+=("upgrade_scale_set")
assert_listener_deleted || failed+=("assert_listener_deleted")
assert_listener_recreated || failed+=("assert_listener_recreated")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@ -36,7 +36,7 @@ var (
testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.329.0"
RunnerVersion = "2.330.0"
RunnerContainerHooksVersion = "0.8.0"
)