Merge branch 'actions:master' into main

This commit is contained in:
sravula84 2024-09-19 14:01:54 -07:00 committed by GitHub
commit 81b8f8bda8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
53 changed files with 1301 additions and 550 deletions

View File

@ -47,7 +47,7 @@ runs:
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
- name: Fetch workflow run & job ids
uses: actions/github-script@v6
uses: actions/github-script@v7
id: query_workflow
with:
script: |
@ -128,7 +128,7 @@ runs:
- name: Wait for workflow to start running
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
uses: actions/github-script@v6
uses: actions/github-script@v7
with:
script: |
function sleep(ms) {
@ -156,7 +156,7 @@ runs:
- name: Wait for workflow to finish successfully
if: inputs.wait-to-finish == 'true'
uses: actions/github-script@v6
uses: actions/github-script@v7
with:
script: |
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'

View File

@ -27,7 +27,7 @@ runs:
using: "composite"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
@ -36,7 +36,7 @@ runs:
driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image
uses: docker/build-push-action@v3
uses: docker/build-push-action@v5
with:
file: Dockerfile
platforms: linux/amd64
@ -56,7 +56,7 @@ runs:
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }}

View File

@ -24,23 +24,23 @@ runs:
shell: bash
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
username: ${{ inputs.username }}
password: ${{ inputs.password }}
- name: Login to GitHub Container Registry
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ inputs.ghcr_username }}

View File

@ -40,12 +40,12 @@ jobs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v3.4
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@ -58,7 +58,7 @@ jobs:
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
@ -134,7 +134,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
@ -145,7 +145,7 @@ jobs:
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
@ -184,7 +184,7 @@ jobs:
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}

View File

@ -39,9 +39,9 @@ jobs:
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- uses: actions/setup-go@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
@ -73,7 +73,7 @@ jobs:
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}

View File

@ -28,7 +28,7 @@ jobs:
name: Trigger Build and Push of Runner Images
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Get runner version
id: versions
run: |
@ -39,7 +39,7 @@ jobs:
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}

View File

@ -21,7 +21,7 @@ jobs:
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Get runner current and latest versions
id: runner_versions
@ -64,7 +64,7 @@ jobs:
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: PR Name
id: pr_name
@ -119,7 +119,7 @@ jobs:
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"

View File

@ -40,13 +40,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@ -67,7 +67,7 @@ jobs:
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'

View File

@ -24,7 +24,7 @@ jobs:
name: runner / shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: shellcheck
uses: reviewdog/action-shellcheck@v1
with:
@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Run tests
run: |

View File

@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.9.1"
IMAGE_VERSION: "0.9.3"
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
@ -33,7 +33,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@ -122,7 +122,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@ -213,7 +213,7 @@ jobs:
env:
WORKFLOW_FILE: arc-test-dind-workflow.yaml
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@ -303,7 +303,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@ -402,7 +402,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@ -503,7 +503,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@ -598,7 +598,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@ -718,7 +718,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@ -888,7 +888,7 @@ jobs:
env:
WORKFLOW_FILE: arc-test-workflow.yaml
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{ github.head_ref }}

View File

@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@ -72,10 +72,10 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
@ -84,14 +84,14 @@ jobs:
driver-opts: image=moby/buildkit:v0.10.6
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build & push controller image
uses: docker/build-push-action@v3
uses: docker/build-push-action@v5
with:
file: Dockerfile
platforms: linux/amd64,linux/arm64
@ -121,7 +121,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@ -140,8 +140,8 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@ -169,7 +169,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@ -188,8 +188,8 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}

View File

@ -36,13 +36,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@ -63,7 +63,7 @@ jobs:
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
@ -84,13 +84,13 @@ jobs:
ct lint --config charts/.ci/ct-config-gha.yaml
- name: Set up docker buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
if: steps.list-changed.outputs.changed == 'true'
with:
version: latest
- name: Build controller image
uses: docker/build-push-action@v3
uses: docker/build-push-action@v5
if: steps.list-changed.outputs.changed == 'true'
with:
file: Dockerfile

View File

@ -55,11 +55,11 @@ jobs:
TARGET_REPO: actions-runner-controller
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
@ -90,10 +90,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
@ -110,16 +110,16 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
version: latest
# Unstable builds - run at your own risk
- name: Build and Push
uses: docker/build-push-action@v3
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile

View File

@ -25,10 +25,10 @@ jobs:
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version-file: go.mod

View File

@ -11,7 +11,7 @@ jobs:
check_for_first_interaction:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: actions/first-interaction@main
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -29,8 +29,8 @@ jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: false
@ -42,13 +42,13 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
uses: golangci/golangci-lint-action@v6
with:
only-new-issues: true
version: v1.55.2
@ -56,8 +56,8 @@ jobs:
generate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: false
@ -69,8 +69,8 @@ jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- run: make manifests

View File

@ -1,7 +1,9 @@
run:
timeout: 3m
output:
format: github-actions
formats:
- format: github-actions
path: stdout
linters-settings:
errcheck:
exclude-functions:

View File

@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.22.1 as builder
FROM --platform=$BUILDPLATFORM golang:1.22.4 as builder
WORKDIR /workspace

View File

@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.315.0
RUNNER_VERSION ?= 2.319.1
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}
@ -68,7 +68,7 @@ endif
all: manager
lint:
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.55.2 golangci-lint run
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.57.2 golangci-lint run
GO_TEST_ARGS ?= -short
@ -310,7 +310,7 @@ github-release: release
# Otherwise we get errors like the below:
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
#
# Note that controller-gen newer than 0.6.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Note that controller-gen newer than 0.6.1 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
controller-gen:
ifeq (, $(shell which controller-gen))

View File

@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.1
version: 0.9.3
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.1"
appVersion: "0.9.3"
home: https://github.com/actions/actions-runner-controller

View File

@ -126,7 +126,3 @@ Create the name of the service account to use
{{- end }}
{{- $names | join ","}}
{{- end }}
{{- define "gha-runner-scale-set-controller.serviceMonitorName" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-service-monitor
{{- end }}

View File

@ -79,6 +79,9 @@ spec:
- "--listener-metrics-endpoint="
- "--metrics-addr=0"
{{- end }}
{{- range .Values.flags.excludeLabelPropagationPrefixes }}
- "--exclude-label-propagation-prefix={{ . }}"
{{- end }}
command:
- "/manager"
{{- with .Values.metrics }}

View File

@ -1035,3 +1035,41 @@ func TestControllerDeployment_MetricsPorts(t *testing.T) {
assert.Equal(t, value.frequency, 1, fmt.Sprintf("frequency of %q is not 1", key))
}
}
func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
require.NoError(t, err)
chart := new(Chart)
err = yaml.Unmarshal(chartContent, chart)
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"flags.excludeLabelPropagationPrefixes[0]": "prefix.com/",
"flags.excludeLabelPropagationPrefixes[1]": "complete.io/label",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
require.Len(t, deployment.Spec.Template.Spec.Containers, 1, "Expected one container")
container := deployment.Spec.Template.Spec.Containers[0]
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
}

View File

@ -119,3 +119,12 @@ flags:
## This can lead to a longer time to apply the change but it will ensure
## that you don't have any overprovisioning of runners.
updateStrategy: "immediate"
## Defines a list of prefixes that should not be propagated to internal resources.
## This is useful when you have labels that are used for internal purposes and should not be propagated to internal resources.
## See https://github.com/actions/actions-runner-controller/issues/3533 for more information.
##
## By default, all labels are propagated to internal resources
## Labels that match prefix specified in the list are excluded from propagation.
# excludeLabelPropagationPrefixes:
# - "argocd.argoproj.io/instance"

View File

@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.1
version: 0.9.3
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.1"
appVersion: "0.9.3"
home: https://github.com/actions/actions-runner-controller

View File

@ -295,8 +295,23 @@ func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessa
func (l *Listener) deleteLastMessage(ctx context.Context) error {
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
if err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID); err != nil {
return fmt.Errorf("failed to delete message: %w", err)
err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err == nil { // if NO error
return nil
}
expiredError := &actions.MessageQueueTokenExpiredError{}
if !errors.As(err, &expiredError) {
return fmt.Errorf("failed to delete last message: %w", err)
}
if err := l.refreshSession(ctx); err != nil {
return err
}
err = l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err != nil {
return fmt.Errorf("failed to delete last message after message session refresh: %w", err)
}
return nil

View File

@ -377,6 +377,93 @@ func TestListener_deleteLastMessage(t *testing.T) {
err = l.deleteLastMessage(ctx)
assert.NotNil(t, err)
})
t.Run("RefreshAndSucceeds", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool {
return lastMessageID.(int64) == int64(5)
})).Return(nil).Once()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.lastMessageID = 5
config.Client = client
err = l.deleteLastMessage(ctx)
assert.NoError(t, err)
})
t.Run("RefreshAndFails", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
config := Config{
ScaleSetID: 1,
Metrics: metrics.Discard,
}
client := listenermocks.NewClient(t)
newUUID := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &newUUID,
OwnerName: "example",
RunnerScaleSet: &actions.RunnerScaleSet{},
MessageQueueUrl: "https://example.com",
MessageQueueAccessToken: "1234567890",
Statistics: nil,
}
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Twice()
config.Client = client
l, err := New(config)
require.Nil(t, err)
oldUUID := uuid.New()
l.session = &actions.RunnerScaleSetSession{
SessionId: &oldUUID,
RunnerScaleSet: &actions.RunnerScaleSet{},
}
l.lastMessageID = 5
config.Client = client
err = l.deleteLastMessage(ctx)
assert.Error(t, err)
})
}
func TestListener_Listen(t *testing.T) {

View File

@ -41,7 +41,7 @@ type Worker struct {
clientset *kubernetes.Clientset
config Config
lastPatch int
lastPatchID int
patchSeq int
logger *logr.Logger
}
@ -51,7 +51,7 @@ func New(config Config, options ...Option) (*Worker, error) {
w := &Worker{
config: config,
lastPatch: -1,
lastPatchID: -1,
patchSeq: -1,
}
conf, err := rest.InClusterConfig()
@ -163,27 +163,8 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
// The function then scales the ephemeral runner set by applying the merge patch.
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
// If any error occurs during the process, it returns an error with a descriptive message.
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
// Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
logValues := []any{
"assigned job", count,
"decision", targetRunnerCount,
"min", w.config.MinRunners,
"max", w.config.MaxRunners,
"currentRunnerCount", w.lastPatch,
"jobsCompleted", jobsCompleted,
}
if count == 0 && jobsCompleted == 0 {
w.lastPatchID = 0
} else {
w.lastPatchID++
}
w.lastPatch = targetRunnerCount
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) {
patchID := w.setDesiredWorkerState(count, jobsCompleted)
original, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
@ -200,8 +181,8 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo
patch, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: targetRunnerCount,
PatchID: w.lastPatchID,
Replicas: w.lastPatch,
PatchID: patchID,
},
},
)
@ -210,14 +191,13 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo
return 0, err
}
w.logger.Info("Compare", "original", string(original), "patch", string(patch))
mergePatch, err := jsonpatch.CreateMergePatch(original, patch)
if err != nil {
return 0, fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err)
}
w.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch))
w.logger.Info("Scaling ephemeral runner set", logValues...)
w.logger.Info("Preparing EphemeralRunnerSet update", "json", string(mergePatch))
patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{}
err = w.clientset.RESTClient().
@ -238,5 +218,40 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo
"name", w.config.EphemeralRunnerSetName,
"replicas", patchedEphemeralRunnerSet.Spec.Replicas,
)
return targetRunnerCount, nil
return w.lastPatch, nil
}
// calculateDesiredState calculates the desired state of the worker based on the desired count and the the number of jobs completed.
func (w *Worker) setDesiredWorkerState(count, jobsCompleted int) int {
// Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
w.patchSeq++
desiredPatchID := w.patchSeq
if count == 0 && jobsCompleted == 0 { // empty batch
targetRunnerCount = max(w.lastPatch, targetRunnerCount)
if targetRunnerCount == w.config.MinRunners {
// We have an empty batch, and the last patch was the min runners.
// Since this is an empty batch, and we are at the min runners, they should all be idle.
// If controller created few more pods on accident (during scale down events),
// this situation allows the controller to scale down to the min runners.
// However, it is important to keep the patch sequence increasing so we don't ignore one batch.
desiredPatchID = 0
}
}
w.lastPatch = targetRunnerCount
w.logger.Info(
"Calculated target runner count",
"assigned job", count,
"decision", targetRunnerCount,
"min", w.config.MinRunners,
"max", w.config.MaxRunners,
"currentRunnerCount", w.lastPatch,
"jobsCompleted", jobsCompleted,
)
return desiredPatchID
}

View File

@ -0,0 +1,326 @@
package worker
import (
"math"
"testing"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
)
func TestSetDesiredWorkerState_MinMaxDefaults(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
config: Config{
MinRunners: 0,
MaxRunners: math.MaxInt32,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
}
}
t.Run("init calculate with acquired 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
assert.Equal(t, 0, patchID)
})
t.Run("init calculate with acquired 1", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
assert.Equal(t, 0, patchID)
})
t.Run("increment patch when job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("increment patch when called with same parameters", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(1, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("calculate desired scale when acquired > 0 and completed > 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 1)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the last state when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("adjust when acquired == 0 and completed == 1", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 1)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MinSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
config: Config{
MinRunners: 1,
MaxRunners: math.MaxInt32,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("request back to 0 on job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("desired patch is 0 but sequence continues on empty batch and min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 4, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 2, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MaxSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
config: Config{
MinRunners: 0,
MaxRunners: 5,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 2, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("request back to 0 on job done", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale up to max when count > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(6, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count == max", func(t *testing.T) {
w := newEmptyWorker()
w.setDesiredWorkerState(5, 0)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count > max and completed > 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(1, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(6, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 5, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale back to 0 when count was > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(6, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
assert.Equal(t, 1, patchID)
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 0, w.lastPatch)
assert.Equal(t, 2, w.patchSeq)
})
}
func TestSetDesiredWorkerState_MinMaxSet(t *testing.T) {
logger := logr.Discard()
newEmptyWorker := func() *Worker {
return &Worker{
config: Config{
MinRunners: 1,
MaxRunners: 3,
},
lastPatch: -1,
patchSeq: -1,
logger: &logger,
}
}
t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 1, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale to min when count == 0", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(2, 0)
assert.Equal(t, 0, patchID)
patchID = w.setDesiredWorkerState(0, 1)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
})
t.Run("scale up to max when count > max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(4, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("scale to max when count == max", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
})
t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) {
w := newEmptyWorker()
patchID := w.setDesiredWorkerState(3, 0)
assert.Equal(t, 0, patchID)
assert.Equal(t, 3, w.lastPatch)
assert.Equal(t, 0, w.patchSeq)
patchID = w.setDesiredWorkerState(0, 3)
assert.Equal(t, 1, patchID)
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 1, w.patchSeq)
// Empty batch on min runners
patchID = w.setDesiredWorkerState(0, 0)
assert.Equal(t, 0, patchID) // forcing the state
assert.Equal(t, 1, w.lastPatch)
assert.Equal(t, 2, w.patchSeq)
})
}

View File

@ -55,7 +55,7 @@ type AutoscalingListenerReconciler struct {
ListenerMetricsAddr string
ListenerMetricsEndpoint string
resourceBuilder resourceBuilder
ResourceBuilder
}
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
@ -242,17 +242,27 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
}
// The listener pod failed might mean the mirror secret is out of date
// Delete the listener pod and re-create it to make sure the mirror secret is up to date
if listenerPod.Status.Phase == corev1.PodFailed && listenerPod.DeletionTimestamp.IsZero() {
log.Info("Listener pod failed, deleting it and re-creating it", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", listenerPod.Status.Reason, "message", listenerPod.Status.Message)
cs := listenerContainerStatus(listenerPod)
switch {
case cs == nil:
log.Info("Listener pod is not ready", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, nil
case cs.State.Terminated != nil:
log.Info("Listener pod is terminated", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", cs.State.Terminated.Reason, "message", cs.State.Terminated.Message)
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
}
if listenerPod.DeletionTimestamp.IsZero() {
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, err
}
}
if listenerPod.Status.Phase == corev1.PodRunning {
return ctrl.Result{}, nil
case cs.State.Running != nil:
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
// stop reconciling. We should never get to this point but if we do,
@ -260,8 +270,8 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// notify the reconciler again.
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
@ -373,7 +383,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
}
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newServiceAccount := r.resourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
return ctrl.Result{}, err
@ -458,7 +468,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
logger.Info("Creating listener config secret")
podConfig, err := r.resourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
if err != nil {
logger.Error(err, "Failed to build listener config secret")
return ctrl.Result{}, err
@ -477,7 +487,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
return ctrl.Result{Requeue: true}, nil
}
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
if err != nil {
logger.Error(err, "Failed to build listener pod")
return ctrl.Result{}, err
@ -537,7 +547,7 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
}
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
return ctrl.Result{}, err
@ -609,7 +619,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
}
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newRole := r.resourceBuilder.newScaleSetListenerRole(autoscalingListener)
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener)
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
if err := r.Create(ctx, newRole); err != nil {
@ -637,7 +647,7 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
}
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
newRoleBinding := r.resourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
logger.Info("Creating listener role binding",
"namespace", newRoleBinding.Namespace,
@ -690,30 +700,6 @@ func (r *AutoscalingListenerReconciler) publishRunningListener(autoscalingListen
// SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
groupVersionIndexer := func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOf(rawObj)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
labelBasedWatchFunc := func(_ context.Context, obj client.Object) []reconcile.Request {
var requests []reconcile.Request
labels := obj.GetLabels()
@ -746,3 +732,13 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Complete(r)
}
func listenerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
for i := range pod.Status.ContainerStatuses {
cs := &pod.Status.ContainerStatuses[i]
if cs.Name == autoscalingListenerContainerName {
return cs
}
}
return nil
}

View File

@ -21,11 +21,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
)
const (
autoscalingListenerTestTimeout = time.Second * 5
autoscalingListenerTestTimeout = time.Second * 20
autoscalingListenerTestInterval = time.Millisecond * 250
autoscalingListenerTestGitHubToken = "gh_token"
)
@ -34,9 +34,9 @@ var _ = Describe("Test AutoScalingListener controller", func() {
var ctx context.Context
var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener
var autoscalingListener *v1alpha1.AutoscalingListener
BeforeEach(func() {
ctx = context.Background()
@ -53,12 +53,12 @@ var _ = Describe("Test AutoScalingListener controller", func() {
min := 1
max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{
autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
MaxRunners: &max,
@ -79,12 +79,12 @@ var _ = Describe("Test AutoScalingListener controller", func() {
err = k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{
autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asl",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingListenerSpec{
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 1,
@ -119,7 +119,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
).Should(Succeed(), "Config secret should be created")
// Check if finalizer is added
created := new(actionsv1alpha1.AutoscalingListener)
created := new(v1alpha1.AutoscalingListener)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created)
@ -298,7 +298,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
// The AutoScalingListener should be deleted
Eventually(
func() error {
listenerList := new(actionsv1alpha1.AutoscalingListenerList)
listenerList := new(v1alpha1.AutoscalingListenerList)
err := k8sClient.List(ctx, listenerList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{".metadata.name": autoscalingListener.Name})
if err != nil {
return err
@ -351,6 +351,53 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
})
It("It should re-create pod whenever listener container is terminated", func() {
// Waiting for the pod is created
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
oldPodUID := string(pod.UID)
updated := pod.DeepCopy()
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
}
err := k8sClient.Status().Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update test pod")
// Waiting for the new pod is created
Eventually(
func() (string, error) {
pod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created")
})
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() {
// Waiting for the pod is created
pod := new(corev1.Pod)
@ -373,7 +420,18 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
updatedPod := pod.DeepCopy()
updatedPod.Status.Phase = corev1.PodFailed
// Ignore status running and consult the container state
updatedPod.Status.Phase = corev1.PodRunning
updatedPod.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
},
}
err = k8sClient.Status().Update(ctx, updatedPod)
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
@ -415,11 +473,12 @@ var _ = Describe("Test AutoScalingListener customization", func() {
var ctx context.Context
var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener
var autoscalingListener *v1alpha1.AutoscalingListener
var runAsUser int64 = 1001
const sidecarContainerName = "sidecar"
listenerPodTemplate := corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
@ -432,7 +491,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
},
},
{
Name: "sidecar",
Name: sidecarContainerName,
ImagePullPolicy: corev1.PullIfNotPresent,
Image: "busybox",
},
@ -458,12 +517,12 @@ var _ = Describe("Test AutoScalingListener customization", func() {
min := 1
max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{
autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
MaxRunners: &max,
@ -484,12 +543,12 @@ var _ = Describe("Test AutoScalingListener customization", func() {
err = k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{
autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asltest",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingListenerSpec{
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 1,
@ -512,7 +571,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
Context("When creating a new AutoScalingListener", func() {
It("It should create customized pod with applied configuration", func() {
// Check if finalizer is added
created := new(actionsv1alpha1.AutoscalingListener)
created := new(v1alpha1.AutoscalingListener)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created)
@ -525,7 +584,8 @@ var _ = Describe("Test AutoScalingListener customization", func() {
return created.Finalizers[0], nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
// Check if config is created
config := new(corev1.Secret)
@ -559,30 +619,119 @@ var _ = Describe("Test AutoScalingListener customization", func() {
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
Expect(pod.Spec.Containers[1].Name).To(Equal("sidecar"), "Pod should have the correct container name")
Expect(pod.Spec.Containers[1].Name).To(Equal(sidecarContainerName), "Pod should have the correct container name")
Expect(pod.Spec.Containers[1].Image).To(Equal("busybox"), "Pod should have the correct image")
Expect(pod.Spec.Containers[1].ImagePullPolicy).To(Equal(corev1.PullIfNotPresent), "Pod should have the correct image pull policy")
})
})
Context("When AutoscalingListener pod has interuptions", func() {
It("Should re-create pod when it is deleted", func() {
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
Expect(len(pod.Spec.Containers)).To(Equal(2), "Pod should have 2 containers")
oldPodUID := string(pod.UID)
err := k8sClient.Delete(ctx, pod)
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
pod = new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
})
It("Should re-create pod when the listener pod is terminated", func() {
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
updated := pod.DeepCopy()
oldPodUID := string(pod.UID)
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
},
{
Name: sidecarContainerName,
State: corev1.ContainerState{
Running: &corev1.ContainerStateRunning{},
},
},
}
err := k8sClient.Status().Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update pod status")
pod = new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
})
})
})
var _ = Describe("Test AutoScalingListener controller with proxy", func() {
var ctx context.Context
var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener
var autoscalingListener *v1alpha1.AutoscalingListener
createRunnerSetAndListener := func(proxy *actionsv1alpha1.ProxyConfig) {
createRunnerSetAndListener := func(proxy *v1alpha1.ProxyConfig) {
min := 1
max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{
autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
MaxRunners: &max,
@ -604,12 +753,12 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
err := k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{
autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asl",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingListenerSpec{
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 1,
@ -658,12 +807,12 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
err := k8sClient.Create(ctx, proxyCredentials)
Expect(err).NotTo(HaveOccurred(), "failed to create proxy credentials secret")
proxy := &actionsv1alpha1.ProxyConfig{
HTTP: &actionsv1alpha1.ProxyServerConfig{
proxy := &v1alpha1.ProxyConfig{
HTTP: &v1alpha1.ProxyServerConfig{
Url: "http://localhost:8080",
CredentialSecretRef: "proxy-credentials",
},
HTTPS: &actionsv1alpha1.ProxyServerConfig{
HTTPS: &v1alpha1.ProxyServerConfig{
Url: "https://localhost:8443",
CredentialSecretRef: "proxy-credentials",
},
@ -766,19 +915,19 @@ var _ = Describe("Test AutoScalingListener controller with template modification
var ctx context.Context
var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener
var autoscalingListener *v1alpha1.AutoscalingListener
createRunnerSetAndListener := func(listenerTemplate *corev1.PodTemplateSpec) {
min := 1
max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{
autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
MaxRunners: &max,
@ -800,12 +949,12 @@ var _ = Describe("Test AutoScalingListener controller with template modification
err := k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{
autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asl",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingListenerSpec{
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 1,
@ -887,7 +1036,6 @@ var _ = Describe("Test AutoScalingListener controller with template modification
g.Expect(pod.ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation-key", "test-annotation-value"), "pod annotations should be copied from runner set template")
g.Expect(pod.ObjectMeta.Labels).To(HaveKeyWithValue("test-label-key", "test-label-value"), "pod labels should be copied from runner set template")
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(Succeed(), "failed to create listener pod with proxy details")
@ -915,9 +1063,9 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
var ctx context.Context
var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace
var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
var configSecret *corev1.Secret
var autoscalingListener *actionsv1alpha1.AutoscalingListener
var autoscalingListener *v1alpha1.AutoscalingListener
var rootCAConfigMap *corev1.ConfigMap
BeforeEach(func() {
@ -955,16 +1103,16 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
min := 1
max := 10
autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{
autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{
CertificateFrom: &actionsv1alpha1.TLSCertificateSource{
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: rootCAConfigMap.Name,
@ -991,16 +1139,16 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
err = k8sClient.Create(ctx, autoscalingRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
autoscalingListener = &actionsv1alpha1.AutoscalingListener{
autoscalingListener = &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asl",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.AutoscalingListenerSpec{
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{
CertificateFrom: &actionsv1alpha1.TLSCertificateSource{
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: rootCAConfigMap.Name,

View File

@ -30,7 +30,6 @@ import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
@ -80,8 +79,7 @@ type AutoscalingRunnerSetReconciler struct {
DefaultRunnerScaleSetListenerImagePullSecrets []string
UpdateStrategy UpdateStrategy
ActionsClient actions.MultiClient
resourceBuilder resourceBuilder
ResourceBuilder
}
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets,verbs=get;list;watch;create;update;patch;delete
@ -135,17 +133,11 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, err
}
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
if err != nil {
if err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log); err != nil {
log.Error(err, "Failed to remove finalizers on dependent resources")
return ctrl.Result{}, err
}
if requeue {
log.Info("Waiting for dependent resources to be deleted")
return ctrl.Result{Requeue: true}, nil
}
log.Info("Removing finalizer")
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
@ -390,7 +382,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
return nil
}
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
c := autoscalingRunnerSetFinalizerDependencyCleaner{
client: r.Client,
autoscalingRunnerSet: autoscalingRunnerSet,
@ -405,12 +397,7 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
c.removeManagerRoleBindingFinalizer(ctx)
c.removeManagerRoleFinalizer(ctx)
requeue, err = c.result()
if err != nil {
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
return true, err
}
return requeue, nil
return c.Err()
}
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
@ -635,7 +622,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
}
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
if err != nil {
log.Error(err, "Could not create EphemeralRunnerSet")
return ctrl.Result{}, err
@ -664,7 +651,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
})
}
autoscalingListener, err := r.resourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
if err != nil {
log.Error(err, "Could not create AutoscalingListener spec")
return ctrl.Result{}, err
@ -759,26 +746,6 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Con
// SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
groupVersionIndexer := func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOf(rawObj)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingRunnerSet" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.AutoscalingRunnerSet{}).
Owns(&v1alpha1.EphemeralRunnerSet{}).
@ -805,17 +772,16 @@ type autoscalingRunnerSetFinalizerDependencyCleaner struct {
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
logger logr.Logger
// fields to operate on
requeue bool
err error
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
return c.requeue, c.err
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) Err() error {
return c.err
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
c.logger.Info("Skipping cleaning up kubernetes mode service account")
return
}
@ -846,7 +812,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
@ -859,7 +824,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@ -889,7 +854,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role")
return
case err != nil && !kerrors.IsNotFound(err):
@ -902,7 +866,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@ -933,7 +897,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes service account")
return
case err != nil && !kerrors.IsNotFound(err):
@ -946,7 +909,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@ -977,7 +940,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
return
case err != nil && !kerrors.IsNotFound(err):
@ -990,7 +952,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@ -1021,7 +983,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
return
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
@ -1034,7 +995,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@ -1065,7 +1026,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
@ -1078,7 +1038,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@ -1109,7 +1069,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
return
case err != nil && !kerrors.IsNotFound(err):

View File

@ -34,7 +34,7 @@ import (
)
const (
autoscalingRunnerSetTestTimeout = time.Second * 5
autoscalingRunnerSetTestTimeout = time.Second * 20
autoscalingRunnerSetTestInterval = time.Millisecond * 250
autoscalingRunnerSetTestGitHubToken = "gh_token"
)

View File

@ -52,7 +52,7 @@ type EphemeralRunnerReconciler struct {
Log logr.Logger
Scheme *runtime.Scheme
ActionsClient actions.MultiClient
resourceBuilder resourceBuilder
ResourceBuilder
}
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
@ -149,6 +149,19 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
log.Info("Adding finalizer")
if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerFinalizerName)
}); err != nil {
log.Error(err, "Failed to update with finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added finalizer")
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
@ -160,18 +173,6 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
}
log.Info("Successfully added runner registration finalizer")
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
log.Info("Adding finalizer")
if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerFinalizerName)
}); err != nil {
log.Error(err, "Failed to update with finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added finalizer")
return ctrl.Result{}, nil
}
@ -521,6 +522,14 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
Name: ephemeralRunner.Name,
}
for i := range ephemeralRunner.Spec.Spec.Containers {
if ephemeralRunner.Spec.Spec.Containers[i].Name == EphemeralRunnerContainerName &&
ephemeralRunner.Spec.Spec.Containers[i].WorkingDir != "" {
jitSettings.WorkFolder = ephemeralRunner.Spec.Spec.Containers[i].WorkingDir
}
}
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
if err != nil {
actionsError := &actions.ActionsError{}
@ -633,7 +642,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
}
log.Info("Creating new pod for ephemeral runner")
newPod := r.resourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
log.Error(err, "Failed to set controller reference to a new pod")
@ -658,7 +667,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
log.Info("Creating new secret for ephemeral runner")
jitSecret := r.resourceBuilder.newEphemeralRunnerJitSecret(runner)
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
@ -815,12 +824,10 @@ func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context,
// SetupWithManager sets up the controller with the Manager.
func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
// TODO(nikola-jokic): Add indexing and filtering fields on corev1.Pod{}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.EphemeralRunner{}).
Owns(&corev1.Pod{}).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Named("ephemeral-runner-controller").
Complete(r)
}

View File

@ -29,8 +29,8 @@ import (
)
const (
timeout = time.Second * 10
interval = time.Millisecond * 250
ephemeralRunnerTimeout = time.Second * 20
ephemeralRunnerInterval = time.Millisecond * 250
runnerImage = "ghcr.io/actions/actions-runner:latest"
)
@ -133,9 +133,9 @@ var _ = Describe("EphemeralRunner", func() {
n := len(created.Finalizers) // avoid capacity mismatch
return created.Finalizers[:n:n], nil
},
timeout,
interval,
).Should(BeEquivalentTo([]string{ephemeralRunnerActionsFinalizerName, ephemeralRunnerFinalizerName}))
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo([]string{ephemeralRunnerFinalizerName, ephemeralRunnerActionsFinalizerName}))
Eventually(
func() (bool, error) {
@ -147,8 +147,8 @@ var _ = Describe("EphemeralRunner", func() {
_, ok := secret.Data[jitTokenKey]
return ok, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@ -160,8 +160,8 @@ var _ = Describe("EphemeralRunner", func() {
return pod.Name, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(ephemeralRunner.Name))
})
@ -184,8 +184,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@ -203,7 +203,7 @@ var _ = Describe("EphemeralRunner", func() {
return "", nil
}
return updated.Status.Phase, nil
}, timeout, interval).Should(BeEquivalentTo(corev1.PodFailed))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodFailed))
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
})
@ -247,8 +247,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
// create runner linked secret
@ -273,8 +273,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
err = k8sClient.Delete(ctx, ephemeralRunner)
@ -289,8 +289,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@ -302,8 +302,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@ -315,8 +315,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@ -328,8 +328,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@ -341,8 +341,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@ -356,8 +356,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return updatedEphemeralRunner.Status.RunnerId, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeNumerically(">", 0))
})
@ -371,8 +371,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
for _, phase := range []corev1.PodPhase{corev1.PodRunning, corev1.PodPending} {
@ -395,8 +395,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(phase))
}
})
@ -411,8 +411,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
pod.Status.Phase = corev1.PodRunning
@ -427,7 +427,7 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
timeout,
ephemeralRunnerTimeout,
).Should(BeEquivalentTo(""))
})
@ -462,8 +462,8 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil(), "Failed to update pod status")
return false, fmt.Errorf("pod haven't failed for 5 times.")
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures")
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
@ -488,7 +488,7 @@ var _ = Describe("EphemeralRunner", func() {
return "", err
}
return updated.Status.Reason, nil
}, timeout, interval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
// EphemeralRunner should not have any pod
Eventually(func() (bool, error) {
@ -497,7 +497,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, nil
}
return kerrors.IsNotFound(err), nil
}, timeout, interval).Should(BeEquivalentTo(true))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
})
It("It should re-create pod on eviction", func() {
@ -510,8 +510,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
pod.Status.Phase = corev1.PodFailed
@ -530,7 +530,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, err
}
return len(updated.Status.Failures) == 1, nil
}, timeout, interval).Should(BeEquivalentTo(true))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
// should re-create after failure
Eventually(
@ -541,8 +541,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@ -555,8 +555,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
@ -577,7 +577,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, err
}
return len(updated.Status.Failures) == 1, nil
}, timeout, interval).Should(BeEquivalentTo(true))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
// should re-create after failure
Eventually(
@ -588,8 +588,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@ -602,8 +602,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
// first set phase to running
@ -627,8 +627,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(corev1.PodRunning))
// set phase to succeeded
@ -644,7 +644,7 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
timeout,
ephemeralRunnerTimeout,
).Should(BeEquivalentTo(corev1.PodRunning))
})
})
@ -700,7 +700,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, err
}
return true, nil
}, timeout, interval).Should(BeEquivalentTo(true))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: EphemeralRunnerContainerName,
@ -720,7 +720,7 @@ var _ = Describe("EphemeralRunner", func() {
return "", nil
}
return updated.Status.Phase, nil
}, timeout, interval).Should(BeEquivalentTo(corev1.PodSucceeded))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodSucceeded))
})
})
@ -800,7 +800,7 @@ var _ = Describe("EphemeralRunner", func() {
return proxySuccessfulllyCalled
},
2*time.Second,
interval,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@ -825,8 +825,8 @@ var _ = Describe("EphemeralRunner", func() {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
g.Expect(err).To(BeNil(), "failed to get ephemeral runner pod")
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get ephemeral runner pod")
Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{
@ -958,7 +958,7 @@ var _ = Describe("EphemeralRunner", func() {
return serverSuccessfullyCalled
},
2*time.Second,
interval,
ephemeralRunnerInterval,
).Should(BeTrue(), "failed to contact server")
})
})

View File

@ -53,7 +53,7 @@ type EphemeralRunnerSetReconciler struct {
PublishMetrics bool
resourceBuilder resourceBuilder
ResourceBuilder
}
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
@ -213,9 +213,6 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
// on the next batch
case ephemeralRunnerSet.Spec.PatchID == 0 && total > ephemeralRunnerSet.Spec.Replicas:
count := total - ephemeralRunnerSet.Spec.Replicas
if count <= 0 {
break
}
log.Info("Deleting ephemeral runners (scale down)", "count", count)
if err := r.deleteIdleEphemeralRunners(
ctx,
@ -363,7 +360,7 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
// Track multiple errors at once and return the bundle.
errs := make([]error, 0)
for i := 0; i < count; i++ {
ephemeralRunner := r.resourceBuilder.newEphemeralRunner(runnerSet)
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet)
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
}
@ -574,28 +571,6 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte
// SetupWithManager sets up the controller with the Manager.
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
// grab the job object, extract the owner...
ephemeralRunner := rawObj.(*v1alpha1.EphemeralRunner)
owner := metav1.GetControllerOf(ephemeralRunner)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "EphemeralRunnerSet" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.EphemeralRunnerSet{}).
Owns(&v1alpha1.EphemeralRunner{}).

View File

@ -23,15 +23,14 @@ import (
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/fake"
"github.com/actions/actions-runner-controller/github/actions/testserver"
)
const (
ephemeralRunnerSetTestTimeout = time.Second * 10
ephemeralRunnerSetTestTimeout = time.Second * 20
ephemeralRunnerSetTestInterval = time.Millisecond * 250
ephemeralRunnerSetTestGitHubToken = "gh_token"
)
@ -40,7 +39,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
var ctx context.Context
var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace
var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet
var ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet
var configSecret *corev1.Secret
BeforeEach(func() {
@ -57,13 +56,13 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{
ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.EphemeralRunnerSetSpec{
EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{
Spec: v1alpha1.EphemeralRunnerSetSpec{
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 100,
@ -90,7 +89,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Context("When creating a new EphemeralRunnerSet", func() {
It("It should create/add all required resources for a new EphemeralRunnerSet (finalizer)", func() {
// Check if finalizer is added
created := new(actionsv1alpha1.EphemeralRunnerSet)
created := new(v1alpha1.EphemeralRunnerSet)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
@ -108,7 +107,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Check if the number of ephemeral runners are stay 0
Consistently(
func() (int, error) {
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
@ -122,7 +121,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Check if the status stay 0
Consistently(
func() (int, error) {
runnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
runnerSet := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet)
if err != nil {
return -1, err
@ -142,7 +141,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Check if the number of ephemeral runners are created
Eventually(
func() (int, error) {
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
@ -176,7 +175,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Check if the status is updated
Eventually(
func() (int, error) {
runnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
runnerSet := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet)
if err != nil {
return -1, err
@ -191,7 +190,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Context("When deleting a new EphemeralRunnerSet", func() {
It("It should cleanup all resources for a deleting EphemeralRunnerSet before removing it", func() {
created := new(actionsv1alpha1.EphemeralRunnerSet)
created := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -204,7 +203,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Wait for the EphemeralRunnerSet to be scaled up
Eventually(
func() (int, error) {
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
@ -242,7 +241,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Check if all ephemeral runners are deleted
Eventually(
func() (int, error) {
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
@ -256,7 +255,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Check if the EphemeralRunnerSet is deleted
Eventually(
func() error {
deleted := new(actionsv1alpha1.EphemeralRunnerSet)
deleted := new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, deleted)
if err != nil {
if kerrors.IsNotFound(err) {
@ -275,7 +274,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Context("When a new EphemeralRunnerSet scale up and down", func() {
It("Should scale up with patch ID 0", func() {
ers := new(actionsv1alpha1.EphemeralRunnerSet)
ers := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -286,7 +285,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -302,7 +301,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
})
It("Should scale up when patch ID changes", func() {
ers := new(actionsv1alpha1.EphemeralRunnerSet)
ers := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -313,7 +312,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -327,7 +326,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(1), "1 EphemeralRunner should be created")
ers = new(actionsv1alpha1.EphemeralRunnerSet)
ers = new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -338,7 +337,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -354,7 +353,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
})
It("Should clean up finished ephemeral runner when scaling down", func() {
ers := new(actionsv1alpha1.EphemeralRunnerSet)
ers := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -365,7 +364,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -390,7 +389,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Keep the ephemeral runner until the next patch
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -405,7 +404,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeEquivalentTo(2), "1 EphemeralRunner should be up")
// The listener was slower to patch the completed, but we should still have 1 running
ers = new(actionsv1alpha1.EphemeralRunnerSet)
ers = new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -416,7 +415,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -432,7 +431,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
})
It("Should keep finished ephemeral runners until patch id changes", func() {
ers := new(actionsv1alpha1.EphemeralRunnerSet)
ers := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -443,7 +442,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -468,7 +467,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// confirm they are not deleted
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Consistently(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -484,7 +483,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
})
It("Should handle double scale up", func() {
ers := new(actionsv1alpha1.EphemeralRunnerSet)
ers := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -495,7 +494,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -520,7 +519,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
ers = new(actionsv1alpha1.EphemeralRunnerSet)
ers = new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -531,7 +530,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
// We should have 3 runners, and have no Succeeded ones
Eventually(
func() error {
@ -558,7 +557,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
})
It("Should handle scale down without removing pending runners", func() {
ers := new(actionsv1alpha1.EphemeralRunnerSet)
ers := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -569,7 +568,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -594,7 +593,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Wait for these statuses to actually be updated
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -623,7 +622,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeNil(), "1 EphemeralRunner should be in Pending and 1 in Succeeded phase")
// Scale down to 0, while 1 is still pending. This simulates the difference between the desired and actual state
ers = new(actionsv1alpha1.EphemeralRunnerSet)
ers = new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -634,7 +633,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
// We should have 1 runner up and pending
Eventually(
func() error {
@ -678,7 +677,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
})
It("Should kill pending and running runners if they are up for some reason and the batch contains no jobs", func() {
ers := new(actionsv1alpha1.EphemeralRunnerSet)
ers := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -689,7 +688,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -715,7 +714,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Wait for these statuses to actually be updated
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -748,7 +747,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Scale down to 0 with patch ID 0. This forces the scale down to self correct on empty batch
ers = new(actionsv1alpha1.EphemeralRunnerSet)
ers = new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -759,7 +758,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Consistently(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -786,7 +785,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Now, eventually, they should be deleted
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -795,7 +794,6 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
@ -803,7 +801,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
})
It("Should replace finished ephemeral runners with new ones", func() {
ers := new(actionsv1alpha1.EphemeralRunnerSet)
ers := new(v1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -814,7 +812,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -841,7 +839,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Wait for these statuses to actually be updated
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -874,7 +872,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Now, let's simulate replacement. The desired count is still 2.
// This simulates that we got 1 job assigned, and 1 job completed.
ers = new(actionsv1alpha1.EphemeralRunnerSet)
ers = new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -885,7 +883,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -911,7 +909,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
})
It("Should update status on Ephemeral Runner state changes", func() {
created := new(actionsv1alpha1.EphemeralRunnerSet)
created := new(v1alpha1.EphemeralRunnerSet)
Eventually(
func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
@ -926,7 +924,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err := k8sClient.Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet replica count")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (bool, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
@ -1036,7 +1034,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
runnerList = new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
@ -1091,7 +1089,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
var ctx context.Context
var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace
var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet
var ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet
var configSecret *corev1.Secret
BeforeEach(func() {
@ -1126,14 +1124,14 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
err := k8sClient.Create(ctx, secretCredentials)
Expect(err).NotTo(HaveOccurred(), "failed to create secret credentials")
ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{
ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.EphemeralRunnerSetSpec{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: 1,
EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
GitHubConfigUrl: "http://example.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 100,
@ -1193,7 +1191,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
).Should(Succeed(), "compiled / flattened proxy secret should exist")
Eventually(func(g Gomega) {
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
g.Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunners")
@ -1211,7 +1209,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
// Set pods to PodSucceeded to simulate an actual EphemeralRunner stopping
Eventually(
func(g Gomega) (int, error) {
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
@ -1293,14 +1291,14 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
proxy.Close()
})
ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{
ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.EphemeralRunnerSetSpec{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: 1,
EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
GitHubConfigUrl: "http://example.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
RunnerScaleSetId: 100,
@ -1327,7 +1325,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
err = k8sClient.Create(ctx, ephemeralRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
@ -1346,7 +1344,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
runnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
runnerSet := new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, runnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
@ -1360,7 +1358,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
return proxySuccessfulllyCalled
},
2*time.Second,
interval,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
})
@ -1369,7 +1367,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
var ctx context.Context
var mgr ctrl.Manager
var autoscalingNS *corev1.Namespace
var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet
var ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet
var configSecret *corev1.Secret
var rootCAConfigMap *corev1.ConfigMap
@ -1431,17 +1429,17 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}}
server.StartTLS()
ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{
ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-asrs",
Namespace: autoscalingNS.Name,
},
Spec: actionsv1alpha1.EphemeralRunnerSetSpec{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: 1,
EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@ -1469,7 +1467,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
err = k8sClient.Create(ctx, ephemeralRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet")
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
@ -1491,7 +1489,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
currentRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
currentRunnerSet := new(v1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, currentRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")

View File

@ -18,6 +18,9 @@ const defaultGitHubToken = "gh_token"
func startManagers(t ginkgo.GinkgoTInterface, first manager.Manager, others ...manager.Manager) {
for _, mgr := range append([]manager.Manager{first}, others...) {
if err := SetupIndexers(mgr); err != nil {
t.Fatalf("failed to setup indexers: %v", err)
}
ctx, cancel := context.WithCancel(context.Background())
g, ctx := errgroup.WithContext(ctx)

View File

@ -0,0 +1,71 @@
package actionsgithubcom
import (
"context"
"slices"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func SetupIndexers(mgr ctrl.Manager) error {
if err := mgr.GetFieldIndexer().IndexField(
context.Background(),
&corev1.Pod{},
resourceOwnerKey,
newGroupVersionOwnerKindIndexer("AutoscalingListener", "EphemeralRunner"),
); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(
context.Background(),
&corev1.ServiceAccount{},
resourceOwnerKey,
newGroupVersionOwnerKindIndexer("AutoscalingListener"),
); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(
context.Background(),
&v1alpha1.EphemeralRunnerSet{},
resourceOwnerKey,
newGroupVersionOwnerKindIndexer("AutoscalingRunnerSet"),
); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(
context.Background(),
&v1alpha1.EphemeralRunner{},
resourceOwnerKey,
newGroupVersionOwnerKindIndexer("EphemeralRunnerSet"),
); err != nil {
return err
}
return nil
}
func newGroupVersionOwnerKindIndexer(ownerKind string, otherOwnerKinds ...string) client.IndexerFunc {
owners := append([]string{ownerKind}, otherOwnerKinds...)
return func(o client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOfNoCopy(o)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || !slices.Contains(owners, owner.Kind) {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
}

View File

@ -8,6 +8,7 @@ import (
"math"
"net"
"strconv"
"strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/build"
@ -68,9 +69,11 @@ func SetListenerEntrypoint(entrypoint string) {
}
}
type resourceBuilder struct{}
type ResourceBuilder struct {
ExcludeLabelPropagationPrefixes []string
}
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
@ -85,13 +88,13 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
}
labels := map[string]string{
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-scale-set-listener",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
}
})
annotations := map[string]string{
annotationKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
@ -150,7 +153,7 @@ func (lm *listenerMetricsServerConfig) containerPort() (corev1.ContainerPort, er
}, nil
}
func (b *resourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
var (
metricsAddr = ""
metricsEndpoint = ""
@ -213,7 +216,7 @@ func (b *resourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
}, nil
}
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
listenerEnv := []corev1.EnvVar{
{
Name: "LISTENER_CONFIG_PATH",
@ -406,33 +409,33 @@ func mergeListenerContainer(base, from *corev1.Container) {
base.TTY = from.TTY
}
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerServiceAccountName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: map[string]string{
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
},
}),
},
}
}
func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
rules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName})
rulesHash := hash.ComputeTemplateHash(&rules)
newRole := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: map[string]string{
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace,
labelKeyListenerName: autoscalingListener.Name,
"role-policy-rules-hash": rulesHash,
},
}),
},
Rules: rules,
}
@ -440,7 +443,7 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
return newRole
}
func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
roleRef := rbacv1.RoleRef{
Kind: "Role",
Name: listenerRole.Name,
@ -460,14 +463,14 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: map[string]string{
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace,
labelKeyListenerName: autoscalingListener.Name,
"role-binding-role-ref-hash": roleRefHash,
"role-binding-subject-hash": subjectHash,
},
}),
},
RoleRef: roleRef,
Subjects: subjects,
@ -476,18 +479,18 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
return newRoleBinding
}
func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
dataHash := hash.ComputeTemplateHash(&secret.Data)
newListenerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: map[string]string{
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"secret-data-hash": dataHash,
},
}),
},
Data: secret.DeepCopy().Data,
}
@ -495,27 +498,26 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
return newListenerSecret
}
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
}
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
labels := map[string]string{
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-set",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
}
})
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil {
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
}
newAnnotations := map[string]string{
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
AnnotationKeyGitHubRunnerScaleSetName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName],
annotationKeyRunnerSpecHash: runnerSpecHash,
@ -545,20 +547,16 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
return newEphemeralRunnerSet, nil
}
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
labels := make(map[string]string)
for _, key := range commonLabelKeys {
switch key {
case LabelKeyKubernetesComponent:
labels[key] = "runner"
default:
v, ok := ephemeralRunnerSet.Labels[key]
if !ok {
continue
}
labels[key] = v
for k, v := range ephemeralRunnerSet.Labels {
if k == LabelKeyKubernetesComponent {
labels[k] = "runner"
} else {
labels[k] = v
}
}
annotations := make(map[string]string)
for key, val := range ephemeralRunnerSet.Annotations {
annotations[key] = val
@ -576,7 +574,7 @@ func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
}
}
func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
var newPod corev1.Pod
labels := map[string]string{}
@ -644,7 +642,7 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
return &newPod
}
func (b *resourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: ephemeralRunner.Name,
@ -751,3 +749,29 @@ func trimLabelValue(val string) string {
}
return val
}
func (b *ResourceBuilder) mergeLabels(base, overwrite map[string]string) map[string]string {
mergedLabels := make(map[string]string, len(base))
base:
for k, v := range base {
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
if strings.HasPrefix(k, prefix) {
continue base
}
}
mergedLabels[k] = v
}
overwrite:
for k, v := range overwrite {
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
if strings.HasPrefix(k, prefix) {
continue overwrite
}
}
mergedLabels[k] = v
}
return mergedLabels
}

View File

@ -21,6 +21,11 @@ func TestLabelPropagation(t *testing.T) {
Labels: map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesVersion: "0.2.0",
"arbitrary-label": "random-value",
"example.com/label": "example-value",
"example.com/example": "example-value",
"directly.excluded.org/label": "excluded-value",
"directly.excluded.org/arbitrary": "not-excluded-value",
},
Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1",
@ -33,7 +38,12 @@ func TestLabelPropagation(t *testing.T) {
},
}
var b resourceBuilder
b := ResourceBuilder{
ExcludeLabelPropagationPrefixes: []string{
"example.com/",
"directly.excluded.org/label",
},
}
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet)
require.NoError(t, err)
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
@ -47,6 +57,7 @@ func TestLabelPropagation(t *testing.T) {
assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], ephemeralRunnerSet.Labels["arbitrary-label"])
listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
require.NoError(t, err)
@ -59,6 +70,12 @@ func TestLabelPropagation(t *testing.T) {
assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise])
assert.Equal(t, "org", listener.Labels[LabelKeyGitHubOrganization])
assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository])
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], listener.Labels["arbitrary-label"])
assert.NotContains(t, listener.Labels, "example.com/label")
assert.NotContains(t, listener.Labels, "example.com/example")
assert.NotContains(t, listener.Labels, "directly.excluded.org/label")
assert.Equal(t, "not-excluded-value", listener.Labels["directly.excluded.org/arbitrary"])
listenerServiceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
@ -125,7 +142,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
GitHubConfigUrl: fmt.Sprintf("https://github.com/%s/%s", organization, repository),
}
var b resourceBuilder
var b ResourceBuilder
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
require.NoError(t, err)
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 0)
@ -149,7 +166,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
GitHubConfigUrl: fmt.Sprintf("https://github.com/enterprises/%s", enterprise),
}
var b resourceBuilder
var b ResourceBuilder
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
require.NoError(t, err)
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 63)

View File

@ -42,7 +42,7 @@ eliminate some duplication:
`-coverprofile` flags: while `-short` is used to skip [old ARC E2E
tests](https://github.com/actions/actions-runner-controller/blob/master/test/e2e/e2e_test.go#L85-L87),
`-coverprofile` is adding to the test time without really giving us any value
in return. We should also start using `actions/setup-go@v4` to take advantage
in return. We should also start using `actions/setup-go@v5` to take advantage
of caching (it would speed up our tests by a lot) or enable it on `v3` if we
have a strong reason not to upgrade. We should keep ignoring our E2E tests too
as those will be run elsewhere (either use `Short` there too or ignoring the

View File

@ -43,6 +43,24 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
## Changelog
### v0.9.3
1. AutoscalingListener controller: Inspect listener container state instead of pod phase [#3548](https://github.com/actions/actions-runner-controller/pull/3548)
1. Exclude label prefix propagation [#3607](https://github.com/actions/actions-runner-controller/pull/3607)
1. Check status code of fetch access token for github app [#3568](https://github.com/actions/actions-runner-controller/pull/3568)
1. Remove .Named() from the ephemeral runner controller [#3596](https://github.com/actions/actions-runner-controller/pull/3596)
1. Customize work directory [#3477](https://github.com/actions/actions-runner-controller/pull/3477)
1. Fix problem with ephemeralRunner Succeeded state before build executed [#3528](https://github.com/actions/actions-runner-controller/pull/3528)
1. Remove finalizers in one pass to speed up cleanups AutoscalingRunnerSet [#3536](https://github.com/actions/actions-runner-controller/pull/3536)
### v0.9.2
1. Refresh session if token expires during delete message [#3529](https://github.com/actions/actions-runner-controller/pull/3529)
1. Re-use the last desired patch on empty batch [#3453](https://github.com/actions/actions-runner-controller/pull/3453)
1. Extract single place to set up indexers [#3454](https://github.com/actions/actions-runner-controller/pull/3454)
1. Include controller version in logs [#3473](https://github.com/actions/actions-runner-controller/pull/3473)
1. Propogate arbitrary labels from runnersets to all created resources [#3157](https://github.com/actions/actions-runner-controller/pull/3157)
### v0.9.1
#### Major changes

View File

@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"strconv"
@ -1054,6 +1055,14 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
return nil, &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: fmt.Errorf("failed to get access token for GitHub App auth: %v", resp.Status),
}
}
// Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app
var accessToken *accessToken
if err = json.NewDecoder(resp.Body).Decode(&accessToken); err != nil {
@ -1131,15 +1140,30 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis
}
retry++
if retry > 3 {
if retry > 5 {
return nil, fmt.Errorf("unable to register runner after 3 retries: %w", &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: innerErr,
})
}
time.Sleep(time.Duration(500 * int(time.Millisecond) * (retry + 1)))
// Add exponential backoff + jitter to avoid thundering herd
// This will generate a backoff schedule:
// 1: 1s
// 2: 3s
// 3: 4s
// 4: 8s
// 5: 17s
baseDelay := 500 * time.Millisecond
jitter := time.Duration(rand.Intn(1000))
maxDelay := 20 * time.Second
delay := baseDelay*(1<<retry) + jitter
if delay > maxDelay {
delay = maxDelay
}
time.Sleep(delay)
}
var actionsServiceAdminConnection *ActionsServiceAdminConnection

View File

@ -170,7 +170,7 @@ func TestNewActionsServiceRequest(t *testing.T) {
}
failures := 0
unauthorizedHandler := func(w http.ResponseWriter, r *http.Request) {
if failures < 2 {
if failures < 5 {
failures++
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)

14
go.mod
View File

@ -1,6 +1,6 @@
module github.com/actions/actions-runner-controller
go 1.22.1
go 1.22.4
require (
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0
@ -14,20 +14,20 @@ require (
github.com/gorilla/mux v1.8.1
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
github.com/gruntwork-io/terratest v0.46.7
github.com/hashicorp/go-retryablehttp v0.7.5
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/kelseyhightower/envconfig v1.4.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.17.1
github.com/onsi/gomega v1.30.0
github.com/onsi/gomega v1.33.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.17.0
github.com/stretchr/testify v1.9.0
github.com/teambition/rrule-go v1.8.2
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.26.0
go.uber.org/zap v1.27.0
golang.org/x/net v0.24.0
golang.org/x/oauth2 v0.15.0
golang.org/x/sync v0.6.0
golang.org/x/oauth2 v0.19.0
golang.org/x/sync v0.7.0
gomodules.xyz/jsonpatch/v2 v2.4.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.28.4
@ -91,7 +91,7 @@ require (
github.com/urfave/cli v1.22.2 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
golang.org/x/sys v0.19.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/term v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.4.0 // indirect

38
go.sum
View File

@ -35,6 +35,8 @@ github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@ -113,12 +115,12 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
@ -145,8 +147,12 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg=
github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
@ -173,8 +179,8 @@ github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE=
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -221,12 +227,12 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -255,16 +261,16 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -286,8 +292,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=

24
main.go
View File

@ -98,6 +98,7 @@ func main() {
logLevel string
logFormat string
watchSingleNamespace string
excludeLabelPropagationPrefixes stringSlice
autoScalerImagePullSecrets stringSlice
@ -138,6 +139,7 @@ func main() {
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions/actions-runner-controller/issues/321 for more information")
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
flag.StringVar(&watchSingleNamespace, "watch-single-namespace", "", "Restrict to watch for custom resources in a single namespace.")
flag.Var(&excludeLabelPropagationPrefixes, "exclude-label-propagation-prefix", "The list of prefixes that should be excluded from label propagation")
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`)
flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.")
@ -239,6 +241,10 @@ func main() {
}
if autoScalingRunnerSetOnly {
if err := actionsgithubcom.SetupIndexers(mgr); err != nil {
log.Error(err, "unable to setup indexers")
os.Exit(1)
}
managerImage := os.Getenv("CONTROLLER_MANAGER_CONTAINER_IMAGE")
if managerImage == "" {
log.Error(err, "unable to obtain listener image")
@ -254,15 +260,20 @@ func main() {
log.WithName("actions-clients"),
)
rb := actionsgithubcom.ResourceBuilder{
ExcludeLabelPropagationPrefixes: excludeLabelPropagationPrefixes,
}
if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("AutoscalingRunnerSet"),
Log: log.WithName("AutoscalingRunnerSet").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ControllerNamespace: managerNamespace,
DefaultRunnerScaleSetListenerImage: managerImage,
ActionsClient: actionsMultiClient,
UpdateStrategy: actionsgithubcom.UpdateStrategy(updateStrategy),
DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets,
ResourceBuilder: rb,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet")
os.Exit(1)
@ -270,9 +281,10 @@ func main() {
if err = (&actionsgithubcom.EphemeralRunnerReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunner"),
Log: log.WithName("EphemeralRunner").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
ResourceBuilder: rb,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunner")
os.Exit(1)
@ -280,10 +292,11 @@ func main() {
if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunnerSet"),
Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
PublishMetrics: metricsAddr != "0",
ResourceBuilder: rb,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet")
os.Exit(1)
@ -291,10 +304,11 @@ func main() {
if err = (&actionsgithubcom.AutoscalingListenerReconciler{
Client: mgr.GetClient(),
Log: log.WithName("AutoscalingListener"),
Log: log.WithName("AutoscalingListener").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ListenerMetricsAddr: listenerMetricsAddr,
ListenerMetricsEndpoint: listenerMetricsEndpoint,
ResourceBuilder: rb,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingListener")
os.Exit(1)
@ -441,7 +455,7 @@ func main() {
}
}
log.Info("starting manager")
log.Info("starting manager", "version", build.Version)
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
log.Error(err, "problem running manager")
os.Exit(1)

View File

@ -6,8 +6,8 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.315.0
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.0
RUNNER_VERSION ?= 2.319.1
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.1
DOCKER_VERSION ?= 24.0.7
# default list of platforms for which multiarch image is built

View File

@ -1,2 +1,2 @@
RUNNER_VERSION=2.315.0
RUNNER_CONTAINER_HOOKS_VERSION=0.6.0
RUNNER_VERSION=2.319.1
RUNNER_CONTAINER_HOOKS_VERSION=0.6.1

View File

@ -36,8 +36,8 @@ var (
testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.315.0"
RunnerContainerHooksVersion = "0.6.0"
RunnerVersion = "2.319.1"
RunnerContainerHooksVersion = "0.6.1"
)
// If you're willing to run this test via VS Code "run test" or "debug test",
@ -1106,7 +1106,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
testing.Step{
Uses: "actions/setup-go@v3",
With: &testing.With{
GoVersion: "1.22.1",
GoVersion: "1.22.4",
},
},
)
@ -1236,7 +1236,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
testing.Step{
Uses: "azure/setup-kubectl@v1",
With: &testing.With{
Version: "v1.22.1",
Version: "v1.22.4",
},
},
testing.Step{

View File

@ -355,7 +355,7 @@ nodes:
image: %s
`, k.Name, image, image))
if err := os.WriteFile(f.Name(), kindConfig, 0644); err != nil {
if err := os.WriteFile(f.Name(), kindConfig, 0o644); err != nil {
return err
}
@ -385,7 +385,7 @@ func (k *Kind) LoadImages(ctx context.Context, images []ContainerImage) error {
}
tmpDir := filepath.Join(wd, ".testing", k.Name)
if err := os.MkdirAll(tmpDir, 0755); err != nil {
if err := os.MkdirAll(tmpDir, 0o755); err != nil {
return err
}
defer func() {

View File

@ -1,7 +1,7 @@
package testing
const (
ActionsCheckout = "actions/checkout@v3"
ActionsCheckout = "actions/checkout@v4"
)
type Workflow struct {