Merge branch 'master' into dind-docker-iptables-legacy

This commit is contained in:
Matt Rouhana 2024-04-09 19:14:08 -04:00 committed by GitHub
commit 8cfe981496
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
60 changed files with 61922 additions and 12775 deletions

View File

@ -78,7 +78,7 @@ jobs:
run: | run: |
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}" RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}" CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
PR_NAME="Updates:" PR_NAME="Updates:"
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ] if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
then then
@ -88,7 +88,7 @@ jobs:
then then
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE" PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
fi fi
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1) result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
if [ -z "$result" ] if [ -z "$result" ]
then then
@ -120,21 +120,25 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: New branch - name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)" run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
- name: Update files - name: Update files
run: | run: |
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/VERSION CURRENT_VERSION="${RUNNER_CURRENT_VERSION//./\\.}"
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/Makefile LATEST_VERSION="${RUNNER_LATEST_VERSION//./\\.}"
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" Makefile sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" test/e2e/e2e_test.go sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/VERSION sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/Makefile
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" Makefile CURRENT_VERSION="${CONTAINER_HOOKS_CURRENT_VERSION//./\\.}"
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" test/e2e/e2e_test.go LATEST_VERSION="${CONTAINER_HOOKS_LATEST_VERSION//./\\.}"
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
- name: Commit changes - name: Commit changes
run: | run: |

View File

@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image" IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.8.1" IMAGE_VERSION: "0.9.0"
concurrency: concurrency:
# This will make sure we only apply the concurrency limits on pull requests # This will make sure we only apply the concurrency limits on pull requests

View File

@ -1,2 +1,2 @@
# actions-runner-controller maintainers # actions-runner-controller maintainers
* @mumoshu @toast-gear @actions/actions-launch @nikola-jokic * @mumoshu @toast-gear @actions/actions-launch @nikola-jokic @rentziass

View File

@ -1,5 +1,5 @@
# Build the manager binary # Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.21.3 as builder FROM --platform=$BUILDPLATFORM golang:1.22.1 as builder
WORKDIR /workspace WORKDIR /workspace

View File

@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD) COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.312.0 RUNNER_VERSION ?= 2.315.0
TARGETPLATFORM ?= $(shell arch) TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION} RUNNER_TAG ?= ${VERSION}
@ -320,7 +320,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\ CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
cd $$CONTROLLER_GEN_TMP_DIR ;\ cd $$CONTROLLER_GEN_TMP_DIR ;\
go mod init tmp ;\ go mod init tmp ;\
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.13.0 ;\ go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0 ;\
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\ rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
} }
endif endif

View File

@ -42,6 +42,10 @@ type EphemeralRunner struct {
Status EphemeralRunnerStatus `json:"status,omitempty"` Status EphemeralRunnerStatus `json:"status,omitempty"`
} }
func (er *EphemeralRunner) IsDone() bool {
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
}
// EphemeralRunnerSpec defines the desired state of EphemeralRunner // EphemeralRunnerSpec defines the desired state of EphemeralRunner
type EphemeralRunnerSpec struct { type EphemeralRunnerSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster

View File

@ -24,6 +24,8 @@ import (
type EphemeralRunnerSetSpec struct { type EphemeralRunnerSetSpec struct {
// Replicas is the number of desired EphemeralRunner resources in the k8s namespace. // Replicas is the number of desired EphemeralRunner resources in the k8s namespace.
Replicas int `json:"replicas,omitempty"` Replicas int `json:"replicas,omitempty"`
// PatchID is the unique identifier for the patch issued by the listener app
PatchID int `json:"patchID"`
EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"` EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"`
} }

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations: annotations:
controller-gen.kubebuilder.io/version: v0.13.0 controller-gen.kubebuilder.io/version: v0.14.0
name: horizontalrunnerautoscalers.actions.summerwind.dev name: horizontalrunnerautoscalers.actions.summerwind.dev
spec: spec:
group: actions.summerwind.dev group: actions.summerwind.dev
@ -35,10 +35,19 @@ spec:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
properties: properties:
apiVersion: apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string type: string
kind: kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string type: string
metadata: metadata:
type: object type: object
@ -47,7 +56,9 @@ spec:
properties: properties:
capacityReservations: capacityReservations:
items: items:
description: CapacityReservation specifies the number of replicas temporarily added to the scale target until ExpirationTime. description: |-
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
properties: properties:
effectiveTime: effectiveTime:
format: date-time format: date-time
@ -79,30 +90,46 @@ spec:
items: items:
properties: properties:
repositoryNames: repositoryNames:
description: RepositoryNames is the list of repository names to be used for calculating the metric. For example, a repository name is the REPO part of `github.com/USER/REPO`. description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items: items:
type: string type: string
type: array type: array
scaleDownAdjustment: scaleDownAdjustment:
description: ScaleDownAdjustment is the number of runners removed on scale-down. You can only specify either ScaleDownFactor or ScaleDownAdjustment. description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer type: integer
scaleDownFactor: scaleDownFactor:
description: ScaleDownFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be removed. description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string type: string
scaleDownThreshold: scaleDownThreshold:
description: ScaleDownThreshold is the percentage of busy runners less than which will trigger the hpa to scale the runners down. description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string type: string
scaleUpAdjustment: scaleUpAdjustment:
description: ScaleUpAdjustment is the number of runners added on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment. description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer type: integer
scaleUpFactor: scaleUpFactor:
description: ScaleUpFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be added. description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string type: string
scaleUpThreshold: scaleUpThreshold:
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up. description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string type: string
type: type:
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy. description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string type: string
type: object type: object
type: array type: array
@ -110,7 +137,9 @@ spec:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer type: integer
scaleDownDelaySecondsAfterScaleOut: scaleDownDelaySecondsAfterScaleOut:
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop) description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer type: integer
scaleTargetRef: scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
@ -126,7 +155,18 @@ spec:
type: string type: string
type: object type: object
scaleUpTriggers: scaleUpTriggers:
description: "ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 on each webhook requested received by the webhookBasedAutoscaler. \n This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster. \n Note that the added runners remain until the next sync period at least, and they may or may not be used by GitHub Actions depending on the timing. They are intended to be used to gain \"resource slack\" immediately after you receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available." description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items: items:
properties: properties:
amount: amount:
@ -139,12 +179,18 @@ spec:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties: properties:
names: names:
description: Names is a list of GitHub Actions glob patterns. Any check_run event whose name matches one of patterns in the list can trigger autoscaling. Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file. So it is very likely that you can utilize this to trigger depending on the job. description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items: items:
type: string type: string
type: array type: array
repositories: repositories:
description: Repositories is a list of GitHub repositories. Any check_run event whose repository matches one of repositories in the list can trigger autoscaling. description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items: items:
type: string type: string
type: array type: array
@ -169,7 +215,9 @@ spec:
type: array type: array
type: object type: object
push: push:
description: PushSpec is the condition for triggering scale-up on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object type: object
workflowJob: workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
@ -178,23 +226,33 @@ spec:
type: object type: object
type: array type: array
scheduledOverrides: scheduledOverrides:
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized. description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items: items:
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year. description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties: properties:
endTime: endTime:
description: EndTime is the time at which the first override ends. description: EndTime is the time at which the first override ends.
format: date-time format: date-time
type: string type: string
minReplicas: minReplicas:
description: MinReplicas is the number of runners while overriding. If omitted, it doesn't override minReplicas. description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0 minimum: 0
nullable: true nullable: true
type: integer type: integer
recurrenceRule: recurrenceRule:
properties: properties:
frequency: frequency:
description: Frequency is the name of a predefined interval of each recurrence. The valid values are "Daily", "Weekly", "Monthly", and "Yearly". If empty, the corresponding override happens only once. description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum: enum:
- Daily - Daily
- Weekly - Weekly
@ -202,7 +260,9 @@ spec:
- Yearly - Yearly
type: string type: string
untilTime: untilTime:
description: UntilTime is the time of the final recurrence. If empty, the schedule recurs forever. description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time format: date-time
type: string type: string
type: object type: object
@ -231,18 +291,24 @@ spec:
type: object type: object
type: array type: array
desiredReplicas: desiredReplicas:
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset. description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer type: integer
lastSuccessfulScaleOutTime: lastSuccessfulScaleOutTime:
format: date-time format: date-time
nullable: true nullable: true
type: string type: string
observedGeneration: observedGeneration:
description: ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. RunnerDeployment's generation, which is updated on mutation by the API Server. description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64 format: int64
type: integer type: integer
scheduledOverridesSummary: scheduledOverridesSummary:
description: ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output for observability. description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string type: string
type: object type: object
type: object type: object

View File

@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.8.1 version: 0.9.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "0.8.1" appVersion: "0.9.0"
home: https://github.com/actions/actions-runner-controller home: https://github.com/actions/actions-runner-controller

View File

@ -2,3 +2,4 @@ Thank you for installing {{ .Chart.Name }}.
Your release is named {{ .Release.Name }}. Your release is named {{ .Release.Name }}.
WARNING: Older version of the listener (githubrunnerscalesetlistener) is deprecated and will be removed in the future gha-runner-scale-set-0.10.0 release. If you are using environment variable override to force the old listener, please remove the environment variable and use the new listener (ghalistener) instead.

View File

@ -110,10 +110,16 @@ spec:
volumeMounts: volumeMounts:
- mountPath: /tmp - mountPath: /tmp
name: tmp name: tmp
{{- range .Values.volumeMounts }}
- {{ toYaml . | nindent 10 }}
{{- end }}
terminationGracePeriodSeconds: 10 terminationGracePeriodSeconds: 10
volumes: volumes:
- name: tmp - name: tmp
emptyDir: {} emptyDir: {}
{{- range .Values.volumes }}
- {{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}

View File

@ -424,10 +424,14 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
"tolerations[0].key": "foo", "tolerations[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo", "affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar", "affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
"priorityClassName": "test-priority-class", "priorityClassName": "test-priority-class",
"flags.updateStrategy": "eventual", "flags.updateStrategy": "eventual",
"flags.logLevel": "info", "flags.logLevel": "info",
"flags.logFormat": "json", "flags.logFormat": "json",
"volumes[0].name": "customMount",
"volumes[0].configMap.name": "my-configmap",
"volumeMounts[0].name": "customMount",
"volumeMounts[0].mountPath": "/my/mount/path",
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
@ -470,9 +474,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup) assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName) assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1) assert.Len(t, deployment.Spec.Template.Spec.Volumes, 2)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name) assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir) assert.NotNil(t, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Volumes[1].Name)
assert.Equal(t, "my-configmap", deployment.Spec.Template.Spec.Volumes[1].ConfigMap.Name)
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1) assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1)
assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"]) assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"])
@ -521,9 +527,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot) assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot)
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser) assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1) assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 2)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name) assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath) assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
assert.Equal(t, "customMount", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
assert.Equal(t, "/my/mount/path", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
} }
func TestTemplate_EnableLeaderElectionRole(t *testing.T) { func TestTemplate_EnableLeaderElectionRole(t *testing.T) {

View File

@ -72,6 +72,10 @@ tolerations: []
affinity: {} affinity: {}
# Mount volumes in the container.
volumes: []
volumeMounts: []
# Leverage a PriorityClass to ensure your pods survive resource shortages # Leverage a PriorityClass to ensure your pods survive resource shortages
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# PriorityClass: system-cluster-critical # PriorityClass: system-cluster-critical

View File

@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.8.1 version: 0.9.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "0.8.1" appVersion: "0.9.0"
home: https://github.com/actions/actions-runner-controller home: https://github.com/actions/actions-runner-controller

View File

@ -99,7 +99,7 @@ volumeMounts:
image: docker:dind image: docker:dind
args: args:
- dockerd - dockerd
- --host=unix:///run/docker/docker.sock - --host=unix:///var/run/docker.sock
- --group=$(DOCKER_GROUP_GID) - --group=$(DOCKER_GROUP_GID)
env: env:
- name: DOCKER_GROUP_GID - name: DOCKER_GROUP_GID
@ -112,7 +112,7 @@ volumeMounts:
- name: work - name: work
mountPath: /home/runner/_work mountPath: /home/runner/_work
- name: dind-sock - name: dind-sock
mountPath: /run/docker mountPath: /var/run
- name: dind-externals - name: dind-externals
mountPath: /home/runner/externals mountPath: /home/runner/externals
{{- end }} {{- end }}
@ -225,7 +225,7 @@ env:
{{- end }} {{- end }}
{{- if $setDockerHost }} {{- if $setDockerHost }}
- name: DOCKER_HOST - name: DOCKER_HOST
value: unix:///run/docker/docker.sock value: unix:///var/run/docker.sock
{{- end }} {{- end }}
{{- if $setRunnerWaitDocker }} {{- if $setRunnerWaitDocker }}
- name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS - name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
@ -266,8 +266,7 @@ volumeMounts:
{{- end }} {{- end }}
{{- if $mountDindCert }} {{- if $mountDindCert }}
- name: dind-sock - name: dind-sock
mountPath: /run/docker mountPath: /var/run
readOnly: true
{{- end }} {{- end }}
{{- if $mountGitHubServerTLS }} {{- if $mountGitHubServerTLS }}
- name: github-server-tls-cert - name: github-server-tls-cert
@ -528,13 +527,13 @@ volumeMounts:
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }} {{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }} {{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- if gt $multiNamespacesCounter 1 }} {{- if gt $multiNamespacesCounter 1 }}
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- if eq $multiNamespacesCounter 1 }} {{- if eq $multiNamespacesCounter 1 }}
{{- with $controllerDeployment.metadata }} {{- with $controllerDeployment.metadata }}
@ -547,11 +546,11 @@ volumeMounts:
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }} {{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }} {{- end }}
{{- else }} {{- else }}
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if eq $managerServiceAccountNamespace "" }} {{- if eq $managerServiceAccountNamespace "" }}
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} {{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.namespace in values.yaml to be explicit if you think the discovery is wrong." }}
{{- end }} {{- end }}
{{- $managerServiceAccountNamespace }} {{- $managerServiceAccountNamespace }}
{{- end }} {{- end }}

View File

@ -13,6 +13,7 @@ metadata:
app.kubernetes.io/component: "autoscaling-runner-set" app.kubernetes.io/component: "autoscaling-runner-set"
{{- include "gha-runner-scale-set.labels" . | nindent 4 }} {{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations: annotations:
actions.github.com/values-hash: {{ toJson .Values | sha256sum | trunc 63 }}
{{- $containerMode := .Values.containerMode }} {{- $containerMode := .Values.containerMode }}
{{- if not (kindIs "string" .Values.githubConfigSecret) }} {{- if not (kindIs "string" .Values.githubConfigSecret) }}
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }} actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}

View File

@ -900,7 +900,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS") assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS")
assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name) assert.Equal(t, "DOCKER_HOST", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "unix:///run/docker/docker.sock", ars.Spec.Template.Spec.Containers[0].Env[0].Value) assert.Equal(t, "unix:///var/run/docker.sock", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS", ars.Spec.Template.Spec.Containers[0].Env[1].Name) assert.Equal(t, "RUNNER_WAIT_FOR_DOCKER_IN_SECONDS", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "120", ars.Spec.Template.Spec.Containers[0].Env[1].Value) assert.Equal(t, "120", ars.Spec.Template.Spec.Containers[0].Env[1].Value)
@ -910,8 +910,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.False(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].ReadOnly) assert.False(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[0].ReadOnly)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name) assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
assert.Equal(t, "/run/docker", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath) assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
assert.True(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].ReadOnly)
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name) assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image) assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
@ -921,7 +920,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath) assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name) assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name)
assert.Equal(t, "/run/docker", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath) assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name) assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name)
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath) assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath)
@ -2089,3 +2088,58 @@ func TestRunnerContainerVolumeNotEmptyMap(t *testing.T) {
_, ok := m.Spec.Template.Spec.Containers[0]["volumeMounts"] _, ok := m.Spec.Template.Spec.Containers[0]["volumeMounts"]
assert.False(t, ok, "volumeMounts should not be set") assert.False(t, ok, "volumeMounts should not be set")
} }
func TestAutoscalingRunnerSetAnnotationValuesHash(t *testing.T) {
t.Parallel()
const valuesHash = "actions.github.com/values-hash"
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
firstHash := autoscalingRunnerSet.Annotations["actions.github.com/values-hash"]
assert.NotEmpty(t, firstHash)
assert.LessOrEqual(t, len(firstHash), 63)
helmChartPath, err = filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
options = &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token1234567890",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
secondHash := autoscalingRunnerSet.Annotations[valuesHash]
assert.NotEmpty(t, secondHash)
assert.NotEqual(t, firstHash, secondHash)
assert.LessOrEqual(t, len(secondHash), 63)
}

View File

@ -125,18 +125,17 @@ template:
## command: ["/home/runner/run.sh"] ## command: ["/home/runner/run.sh"]
## env: ## env:
## - name: DOCKER_HOST ## - name: DOCKER_HOST
## value: unix:///run/docker/docker.sock ## value: unix:///var/run/docker.sock
## volumeMounts: ## volumeMounts:
## - name: work ## - name: work
## mountPath: /home/runner/_work ## mountPath: /home/runner/_work
## - name: dind-sock ## - name: dind-sock
## mountPath: /run/docker ## mountPath: /var/run
## readOnly: true
## - name: dind ## - name: dind
## image: docker:dind ## image: docker:dind
## args: ## args:
## - dockerd ## - dockerd
## - --host=unix:///run/docker/docker.sock ## - --host=unix:///var/run/docker.sock
## - --group=$(DOCKER_GROUP_GID) ## - --group=$(DOCKER_GROUP_GID)
## env: ## env:
## - name: DOCKER_GROUP_GID ## - name: DOCKER_GROUP_GID
@ -149,7 +148,7 @@ template:
## - name: work ## - name: work
## mountPath: /home/runner/_work ## mountPath: /home/runner/_work
## - name: dind-sock ## - name: dind-sock
## mountPath: /run/docker ## mountPath: /var/run
## - name: dind-externals ## - name: dind-externals
## mountPath: /home/runner/externals ## mountPath: /home/runner/externals
## volumes: ## volumes:

View File

@ -34,7 +34,7 @@ type Listener interface {
//go:generate mockery --name Worker --output ./mocks --outpkg mocks --case underscore //go:generate mockery --name Worker --output ./mocks --outpkg mocks --case underscore
type Worker interface { type Worker interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error)
} }
func New(config config.Config) (*App, error) { func New(config config.Config) (*App, error) {

View File

@ -15,23 +15,23 @@ type Worker struct {
mock.Mock mock.Mock
} }
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count // HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) { func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
ret := _m.Called(ctx, count) ret := _m.Called(ctx, count, acquireCount)
var r0 int var r0 int
var r1 error var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (int, error)); ok { if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return rf(ctx, count) return rf(ctx, count, acquireCount)
} }
if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = rf(ctx, count) r0 = rf(ctx, count, acquireCount)
} else { } else {
r0 = ret.Get(0).(int) r0 = ret.Get(0).(int)
} }
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = rf(ctx, count) r1 = rf(ctx, count, acquireCount)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }

View File

@ -114,7 +114,7 @@ func New(config Config) (*Listener, error) {
//go:generate mockery --name Handler --output ./mocks --outpkg mocks --case underscore //go:generate mockery --name Handler --output ./mocks --outpkg mocks --case underscore
type Handler interface { type Handler interface {
HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error
HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error)
} }
// Listen listens for incoming messages and handles them using the provided handler. // Listen listens for incoming messages and handles them using the provided handler.
@ -145,7 +145,7 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error {
} }
l.metrics.PublishStatistics(initialMessage.Statistics) l.metrics.PublishStatistics(initialMessage.Statistics)
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs) desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, initialMessage.Statistics.TotalAssignedJobs, 0)
if err != nil { if err != nil {
return fmt.Errorf("handling initial message failed: %w", err) return fmt.Errorf("handling initial message failed: %w", err)
} }
@ -207,7 +207,7 @@ func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *acti
l.metrics.PublishJobStarted(jobStarted) l.metrics.PublishJobStarted(jobStarted)
} }
desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, parsedMsg.statistics.TotalAssignedJobs) desiredRunners, err := handler.HandleDesiredRunnerCount(ctx, parsedMsg.statistics.TotalAssignedJobs, len(parsedMsg.jobsCompleted))
if err != nil { if err != nil {
return fmt.Errorf("failed to handle desired runner count: %w", err) return fmt.Errorf("failed to handle desired runner count: %w", err)
} }
@ -284,7 +284,6 @@ func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessa
} }
return msg, nil return msg, nil
} }
func (l *Listener) deleteLastMessage(ctx context.Context) error { func (l *Listener) deleteLastMessage(ctx context.Context) error {
@ -384,9 +383,9 @@ func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*ac
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids)) l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
ids, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids) idsAcquired, err := l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err == nil { // if NO errors if err == nil { // if NO errors
return ids, nil return idsAcquired, nil
} }
expiredError := &actions.MessageQueueTokenExpiredError{} expiredError := &actions.MessageQueueTokenExpiredError{}
@ -398,12 +397,12 @@ func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*ac
return nil, err return nil, err
} }
ids, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids) idsAcquired, err = l.client.AcquireJobs(ctx, l.scaleSetID, l.session.MessageQueueAccessToken, ids)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to acquire jobs after session refresh: %w", err) return nil, fmt.Errorf("failed to acquire jobs after session refresh: %w", err)
} }
return ids, nil return idsAcquired, nil
} }
func (l *Listener) refreshSession(ctx context.Context) error { func (l *Listener) refreshSession(ctx context.Context) error {

View File

@ -37,7 +37,6 @@ func TestNew(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
assert.NotNil(t, l) assert.NotNil(t, l)
}) })
} }
func TestListener_createSession(t *testing.T) { func TestListener_createSession(t *testing.T) {
@ -428,7 +427,7 @@ func TestListener_Listen(t *testing.T) {
var called bool var called bool
handler := listenermocks.NewHandler(t) handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything). handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil). Return(0, nil).
Run( Run(
func(mock.Arguments) { func(mock.Arguments) {
@ -486,11 +485,11 @@ func TestListener_Listen(t *testing.T) {
config.Client = client config.Client = client
handler := listenermocks.NewHandler(t) handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything). handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil). Return(0, nil).
Once() Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything). handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 0).
Return(0, nil). Return(0, nil).
Once() Once()
@ -628,9 +627,6 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
} }
client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once() client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once()
// First call to AcquireJobs will fail with a token expired error
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
// Second call to AcquireJobs will succeed // Second call to AcquireJobs will succeed
want := []int64{1, 2, 3} want := []int64{1, 2, 3}
availableJobs := []*actions.JobAvailable{ availableJobs := []*actions.JobAvailable{
@ -650,7 +646,24 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
}, },
}, },
} }
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).Return(want, nil).Once()
// First call to AcquireJobs will fail with a token expired error
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(nil, &actions.MessageQueueTokenExpiredError{}).
Once()
// Second call should succeed
client.On("AcquireJobs", ctx, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
ids := args.Get(3).([]int64)
assert.Equal(t, want, ids)
}).
Return(want, nil).
Once()
config.Client = client config.Client = client

View File

@ -86,7 +86,7 @@ func TestInitialMetrics(t *testing.T) {
config.Client = client config.Client = client
handler := listenermocks.NewHandler(t) handler := listenermocks.NewHandler(t)
handler.On("HandleDesiredRunnerCount", mock.Anything, sessionStatistics.TotalAssignedJobs). handler.On("HandleDesiredRunnerCount", mock.Anything, sessionStatistics.TotalAssignedJobs, 0).
Return(sessionStatistics.TotalAssignedJobs, nil). Return(sessionStatistics.TotalAssignedJobs, nil).
Once() Once()
@ -178,7 +178,7 @@ func TestHandleMessageMetrics(t *testing.T) {
handler := listenermocks.NewHandler(t) handler := listenermocks.NewHandler(t)
handler.On("HandleJobStarted", mock.Anything, jobsStarted[0]).Return(nil).Once() handler.On("HandleJobStarted", mock.Anything, jobsStarted[0]).Return(nil).Once()
handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything).Return(desiredResult, nil).Once() handler.On("HandleDesiredRunnerCount", mock.Anything, mock.Anything, 2).Return(desiredResult, nil).Once()
client := listenermocks.NewClient(t) client := listenermocks.NewClient(t)
client.On("DeleteMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() client.On("DeleteMessage", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()

View File

@ -15,23 +15,23 @@ type Handler struct {
mock.Mock mock.Mock
} }
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count // HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) { func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
ret := _m.Called(ctx, count) ret := _m.Called(ctx, count, jobsCompleted)
var r0 int var r0 int
var r1 error var r1 error
if rf, ok := ret.Get(0).(func(context.Context, int) (int, error)); ok { if rf, ok := ret.Get(0).(func(context.Context, int, int) (int, error)); ok {
return rf(ctx, count) return rf(ctx, count, jobsCompleted)
} }
if rf, ok := ret.Get(0).(func(context.Context, int) int); ok { if rf, ok := ret.Get(0).(func(context.Context, int, int) int); ok {
r0 = rf(ctx, count) r0 = rf(ctx, count, jobsCompleted)
} else { } else {
r0 = ret.Get(0).(int) r0 = ret.Get(0).(int)
} }
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { if rf, ok := ret.Get(1).(func(context.Context, int, int) error); ok {
r1 = rf(ctx, count) r1 = rf(ctx, count, jobsCompleted)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }

View File

@ -223,8 +223,8 @@ type baseLabels struct {
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels { func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
return prometheus.Labels{ return prometheus.Labels{
labelKeyEnterprise: b.enterprise, labelKeyEnterprise: b.enterprise,
labelKeyOrganization: b.organization, labelKeyOrganization: jobBase.OwnerName,
labelKeyRepository: b.repository, labelKeyRepository: jobBase.RepositoryName,
labelKeyJobName: jobBase.JobDisplayName, labelKeyJobName: jobBase.JobDisplayName,
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef, labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
labelKeyEventName: jobBase.EventName, labelKeyEventName: jobBase.EventName,
@ -271,8 +271,10 @@ type ServerPublisher interface {
ListenAndServe(ctx context.Context) error ListenAndServe(ctx context.Context) error
} }
var _ Publisher = &discard{} var (
var _ ServerPublisher = &exporter{} _ Publisher = &discard{}
_ ServerPublisher = &exporter{}
)
var Discard Publisher = &discard{} var Discard Publisher = &discard{}

View File

@ -38,18 +38,20 @@ type Config struct {
// The Worker's role is to process the messages it receives from the listener. // The Worker's role is to process the messages it receives from the listener.
// It then initiates Kubernetes API requests to carry out the necessary actions. // It then initiates Kubernetes API requests to carry out the necessary actions.
type Worker struct { type Worker struct {
clientset *kubernetes.Clientset clientset *kubernetes.Clientset
config Config config Config
lastPatch int lastPatch int
logger *logr.Logger lastPatchID int
logger *logr.Logger
} }
var _ listener.Handler = (*Worker)(nil) var _ listener.Handler = (*Worker)(nil)
func New(config Config, options ...Option) (*Worker, error) { func New(config Config, options ...Option) (*Worker, error) {
w := &Worker{ w := &Worker{
config: config, config: config,
lastPatch: -1, lastPatch: -1,
lastPatchID: -1,
} }
conf, err := rest.InClusterConfig() conf, err := rest.InClusterConfig()
@ -161,7 +163,7 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
// The function then scales the ephemeral runner set by applying the merge patch. // The function then scales the ephemeral runner set by applying the merge patch.
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful. // Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
// If any error occurs during the process, it returns an error with a descriptive message. // If any error occurs during the process, it returns an error with a descriptive message.
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int, error) { func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
// Max runners should always be set by the resource builder either to the configured value, // Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()). // or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners) targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners)
@ -172,17 +174,22 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int,
"min", w.config.MinRunners, "min", w.config.MinRunners,
"max", w.config.MaxRunners, "max", w.config.MaxRunners,
"currentRunnerCount", w.lastPatch, "currentRunnerCount", w.lastPatch,
"jobsCompleted", jobsCompleted,
} }
if targetRunnerCount == w.lastPatch { if w.lastPatch == targetRunnerCount && jobsCompleted == 0 {
w.logger.Info("Skipping patching of EphemeralRunnerSet as the desired count has not changed", logValues...) w.logger.Info("Skipping patch", logValues...)
return targetRunnerCount, nil return targetRunnerCount, nil
} }
w.lastPatchID++
w.lastPatch = targetRunnerCount
original, err := json.Marshal( original, err := json.Marshal(
&v1alpha1.EphemeralRunnerSet{ &v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{ Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: -1, Replicas: -1,
PatchID: -1,
}, },
}, },
) )
@ -194,6 +201,7 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int) (int,
&v1alpha1.EphemeralRunnerSet{ &v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{ Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: targetRunnerCount, Replicas: targetRunnerCount,
PatchID: w.lastPatchID,
}, },
}, },
) )

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations: annotations:
controller-gen.kubebuilder.io/version: v0.13.0 controller-gen.kubebuilder.io/version: v0.14.0
name: horizontalrunnerautoscalers.actions.summerwind.dev name: horizontalrunnerautoscalers.actions.summerwind.dev
spec: spec:
group: actions.summerwind.dev group: actions.summerwind.dev
@ -35,10 +35,19 @@ spec:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
properties: properties:
apiVersion: apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string type: string
kind: kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string type: string
metadata: metadata:
type: object type: object
@ -47,7 +56,9 @@ spec:
properties: properties:
capacityReservations: capacityReservations:
items: items:
description: CapacityReservation specifies the number of replicas temporarily added to the scale target until ExpirationTime. description: |-
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
properties: properties:
effectiveTime: effectiveTime:
format: date-time format: date-time
@ -79,30 +90,46 @@ spec:
items: items:
properties: properties:
repositoryNames: repositoryNames:
description: RepositoryNames is the list of repository names to be used for calculating the metric. For example, a repository name is the REPO part of `github.com/USER/REPO`. description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items: items:
type: string type: string
type: array type: array
scaleDownAdjustment: scaleDownAdjustment:
description: ScaleDownAdjustment is the number of runners removed on scale-down. You can only specify either ScaleDownFactor or ScaleDownAdjustment. description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer type: integer
scaleDownFactor: scaleDownFactor:
description: ScaleDownFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be removed. description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string type: string
scaleDownThreshold: scaleDownThreshold:
description: ScaleDownThreshold is the percentage of busy runners less than which will trigger the hpa to scale the runners down. description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string type: string
scaleUpAdjustment: scaleUpAdjustment:
description: ScaleUpAdjustment is the number of runners added on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment. description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer type: integer
scaleUpFactor: scaleUpFactor:
description: ScaleUpFactor is the multiplicative factor applied to the current number of runners used to determine how many pods should be added. description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string type: string
scaleUpThreshold: scaleUpThreshold:
description: ScaleUpThreshold is the percentage of busy runners greater than which will trigger the hpa to scale runners up. description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string type: string
type: type:
description: Type is the type of metric to be used for autoscaling. It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy. description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string type: string
type: object type: object
type: array type: array
@ -110,7 +137,9 @@ spec:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer type: integer
scaleDownDelaySecondsAfterScaleOut: scaleDownDelaySecondsAfterScaleOut:
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop) description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer type: integer
scaleTargetRef: scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
@ -126,7 +155,18 @@ spec:
type: string type: string
type: object type: object
scaleUpTriggers: scaleUpTriggers:
description: "ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 on each webhook requested received by the webhookBasedAutoscaler. \n This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster. \n Note that the added runners remain until the next sync period at least, and they may or may not be used by GitHub Actions depending on the timing. They are intended to be used to gain \"resource slack\" immediately after you receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available." description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items: items:
properties: properties:
amount: amount:
@ -139,12 +179,18 @@ spec:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties: properties:
names: names:
description: Names is a list of GitHub Actions glob patterns. Any check_run event whose name matches one of patterns in the list can trigger autoscaling. Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file. So it is very likely that you can utilize this to trigger depending on the job. description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items: items:
type: string type: string
type: array type: array
repositories: repositories:
description: Repositories is a list of GitHub repositories. Any check_run event whose repository matches one of repositories in the list can trigger autoscaling. description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items: items:
type: string type: string
type: array type: array
@ -169,7 +215,9 @@ spec:
type: array type: array
type: object type: object
push: push:
description: PushSpec is the condition for triggering scale-up on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object type: object
workflowJob: workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
@ -178,23 +226,33 @@ spec:
type: object type: object
type: array type: array
scheduledOverrides: scheduledOverrides:
description: ScheduledOverrides is the list of ScheduledOverride. It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. The earlier a scheduled override is, the higher it is prioritized. description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items: items:
description: ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year. description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties: properties:
endTime: endTime:
description: EndTime is the time at which the first override ends. description: EndTime is the time at which the first override ends.
format: date-time format: date-time
type: string type: string
minReplicas: minReplicas:
description: MinReplicas is the number of runners while overriding. If omitted, it doesn't override minReplicas. description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0 minimum: 0
nullable: true nullable: true
type: integer type: integer
recurrenceRule: recurrenceRule:
properties: properties:
frequency: frequency:
description: Frequency is the name of a predefined interval of each recurrence. The valid values are "Daily", "Weekly", "Monthly", and "Yearly". If empty, the corresponding override happens only once. description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum: enum:
- Daily - Daily
- Weekly - Weekly
@ -202,7 +260,9 @@ spec:
- Yearly - Yearly
type: string type: string
untilTime: untilTime:
description: UntilTime is the time of the final recurrence. If empty, the schedule recurs forever. description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time format: date-time
type: string type: string
type: object type: object
@ -231,18 +291,24 @@ spec:
type: object type: object
type: array type: array
desiredReplicas: desiredReplicas:
description: DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet This doesn't include outdated pods while upgrading the deployment and replacing the runnerset. description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer type: integer
lastSuccessfulScaleOutTime: lastSuccessfulScaleOutTime:
format: date-time format: date-time
nullable: true nullable: true
type: string type: string
observedGeneration: observedGeneration:
description: ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. RunnerDeployment's generation, which is updated on mutation by the API Server. description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64 format: int64
type: integer type: integer
scheduledOverridesSummary: scheduledOverridesSummary:
description: ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output for observability. description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string type: string
type: object type: object
type: object type: object

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -42,10 +42,14 @@ import (
) )
const ( const (
labelKeyRunnerSpecHash = "runner-spec-hash" annotationKeyRunnerSpecHash = "actions.github.com/runner-spec-hash"
// annotationKeyValuesHash is hash of the entire values json.
// This is used to determine if the values have changed, so we can
// re-create listener.
annotationKeyValuesHash = "actions.github.com/values-hash"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
runnerScaleSetIdAnnotationKey = "runner-scale-set-id" runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
) )
type UpdateStrategy string type UpdateStrategy string
@ -205,7 +209,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
} }
// Make sure the runner scale set name is up to date // Make sure the runner scale set name is up to date
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[runnerScaleSetNameAnnotationKey] currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) { if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.") log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log) return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
@ -231,9 +235,8 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
} }
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
for _, runnerSet := range existingRunnerSets.all() { for _, runnerSet := range existingRunnerSets.all() {
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash]) log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Annotations[annotationKeyRunnerSpecHash])
} }
// Make sure the AutoscalingListener is up and running in the controller namespace // Make sure the AutoscalingListener is up and running in the controller namespace
@ -250,7 +253,9 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
} }
// Our listener pod is out of date, so we need to delete it to get a new recreate. // Our listener pod is out of date, so we need to delete it to get a new recreate.
if listenerFound && (listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()) { listenerValuesHashChanged := listener.Annotations[annotationKeyValuesHash] != autoscalingRunnerSet.Annotations[annotationKeyValuesHash]
listenerSpecHashChanged := listener.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()
if listenerFound && (listenerValuesHashChanged || listenerSpecHashChanged) {
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name) log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
if err := r.Delete(ctx, listener); err != nil { if err := r.Delete(ctx, listener); err != nil {
if kerrors.IsNotFound(err) { if kerrors.IsNotFound(err) {
@ -264,7 +269,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] { if latestRunnerSet.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.RunnerSetSpecHash() {
if r.drainingJobs(&latestRunnerSet.Status) { if r.drainingJobs(&latestRunnerSet.Status) {
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners) log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners)
log.Info("Scaling down the number of desired replicas to 0") log.Info("Scaling down the number of desired replicas to 0")
@ -480,7 +485,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels") logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels")
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[runnerScaleSetNameAnnotationKey] = runnerScaleSet.Name obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = runnerScaleSet.Name
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id) obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
@ -528,9 +533,10 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
return ctrl.Result{}, err return ctrl.Result{}, err
} }
logger.Info("Updating runner scale set runner group name as an annotation") logger.Info("Updating runner scale set name and runner group name as annotations")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = updatedRunnerScaleSet.RunnerGroupName
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = updatedRunnerScaleSet.Name
}); err != nil { }); err != nil {
logger.Error(err, "Failed to update runner group name annotation") logger.Error(err, "Failed to update runner group name annotation")
return ctrl.Result{}, err return ctrl.Result{}, err
@ -566,7 +572,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
logger.Info("Updating runner scale set name as an annotation") logger.Info("Updating runner scale set name as an annotation")
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) { if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[runnerScaleSetNameAnnotationKey] = updatedRunnerScaleSet.Name obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = updatedRunnerScaleSet.Name
}); err != nil { }); err != nil {
logger.Error(err, "Failed to update runner scale set name annotation") logger.Error(err, "Failed to update runner scale set name annotation")
return ctrl.Result{}, err return ctrl.Result{}, err

View File

@ -280,6 +280,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// This should trigger re-creation of EphemeralRunnerSet and Listener // This should trigger re-creation of EphemeralRunnerSet and Listener
patched := autoscalingRunnerSet.DeepCopy() patched := autoscalingRunnerSet.DeepCopy()
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class" patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "test-hash"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
autoscalingRunnerSet = patched.DeepCopy() autoscalingRunnerSet = patched.DeepCopy()
@ -297,10 +301,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items)) return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
} }
return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil return runnerSetList.Items[0].Annotations[annotationKeyRunnerSpecHash], nil
}, },
autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created") autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Annotations[annotationKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
// We should create a new listener // We should create a new listener
Eventually( Eventually(
@ -334,6 +338,55 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
// We should not re-create a new EphemeralRunnerSet
Consistently(
func() (string, error) {
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
if err != nil {
return "", err
}
if len(runnerSetList.Items) != 1 {
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
}
return string(runnerSetList.Items[0].UID), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo(string(runnerSet.UID)), "New EphemeralRunnerSet should not be created")
// We should only re-create a new listener
Eventually(
func() (string, error) {
listener := new(v1alpha1.AutoscalingListener)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
if err != nil {
return "", err
}
return string(listener.UID), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(string(listener.UID)), "New Listener should be created")
// Only update the values hash for the autoscaling runner set
// This should trigger re-creation of the Listener only
runnerSetList = new(v1alpha1.EphemeralRunnerSetList)
err = k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet")
runnerSet = runnerSetList.Items[0]
listener = new(v1alpha1.AutoscalingListener)
err = k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
patched = autoscalingRunnerSet.DeepCopy()
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "hash-changes"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
// We should not re-create a new EphemeralRunnerSet // We should not re-create a new EphemeralRunnerSet
Consistently( Consistently(
func() (string, error) { func() (string, error) {
@ -493,6 +546,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// Patch the AutoScalingRunnerSet image which should trigger // Patch the AutoScalingRunnerSet image which should trigger
// the recreation of the Listener and EphemeralRunnerSet // the recreation of the Listener and EphemeralRunnerSet
patched := autoscalingRunnerSet.DeepCopy() patched := autoscalingRunnerSet.DeepCopy()
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "testgroup2"
patched.Spec.Template.Spec = corev1.PodSpec{ patched.Spec.Template.Spec = corev1.PodSpec{
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
@ -501,7 +558,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
}, },
}, },
} }
// patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
autoscalingRunnerSet = patched.DeepCopy() autoscalingRunnerSet = patched.DeepCopy()
@ -698,7 +754,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
return "", err return "", err
} }
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok { if val, ok := ars.Annotations[AnnotationKeyGitHubRunnerScaleSetName]; ok {
return val, nil return val, nil
} }
@ -722,7 +778,7 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
return "", err return "", err
} }
if val, ok := ars.Annotations[runnerScaleSetNameAnnotationKey]; ok { if val, ok := ars.Annotations[AnnotationKeyGitHubRunnerScaleSetName]; ok {
return val, nil return val, nil
} }

View File

@ -39,7 +39,11 @@ const (
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running // Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection" const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name" const (
AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
AnnotationKeyGitHubRunnerScaleSetName = "actions.github.com/runner-scale-set-name"
AnnotationKeyPatchID = "actions.github.com/patch-id"
)
// Labels applied to listener roles // Labels applied to listener roles
const ( const (

View File

@ -133,6 +133,23 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
if ephemeralRunner.IsDone() {
log.Info("Cleaning up resources after after ephemeral runner termination", "phase", ephemeralRunner.Status.Phase)
done, err := r.cleanupResources(ctx, ephemeralRunner, log)
if err != nil {
log.Error(err, "Failed to clean up ephemeral runner owned resources")
return ctrl.Result{}, err
}
if !done {
log.Info("Waiting for ephemeral runner owned resources to be deleted")
return ctrl.Result{Requeue: true}, nil
}
// Stop reconciling on this object.
// The EphemeralRunnerSet is responsible for cleaning it up.
log.Info("EphemeralRunner has already finished. Stopping reconciliation and waiting for EphemeralRunnerSet to clean it up", "phase", ephemeralRunner.Status.Phase)
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) { if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer") log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
@ -159,13 +176,6 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
if ephemeralRunner.Status.Phase == corev1.PodSucceeded || ephemeralRunner.Status.Phase == corev1.PodFailed {
// Stop reconciling on this object.
// The EphemeralRunnerSet is responsible for cleaning it up.
log.Info("EphemeralRunner has already finished. Stopping reconciliation and waiting for EphemeralRunnerSet to clean it up", "phase", ephemeralRunner.Status.Phase)
return ctrl.Result{}, nil
}
if ephemeralRunner.Status.RunnerId == 0 { if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Creating new ephemeral runner registration and updating status with runner config") log.Info("Creating new ephemeral runner registration and updating status with runner config")
return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log) return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log)
@ -324,7 +334,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
} }
} }
return false, nil return false, nil
case err != nil && !kerrors.IsNotFound(err): case !kerrors.IsNotFound(err):
return false, err return false, err
} }
log.Info("Pod is deleted") log.Info("Pod is deleted")
@ -341,7 +351,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
} }
} }
return false, nil return false, nil
case err != nil && !kerrors.IsNotFound(err): case !kerrors.IsNotFound(err):
return false, err return false, err
} }
log.Info("Secret is deleted") log.Info("Secret is deleted")
@ -675,7 +685,7 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
return nil return nil
} }
log.Info("Updating ephemeral runner status with pod phase", "phase", pod.Status.Phase, "reason", pod.Status.Reason, "message", pod.Status.Message) log.Info("Updating ephemeral runner status with pod phase", "statusPhase", pod.Status.Phase, "statusReason", pod.Status.Reason, "statusMessage", pod.Status.Message)
err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = pod.Status.Phase obj.Status.Phase = pod.Status.Phase
obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning) obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning)

View File

@ -22,6 +22,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"sort" "sort"
"strconv"
"strings" "strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@ -156,14 +157,14 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, err return ctrl.Result{}, err
} }
pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList) ephemeralRunnerState := newEphemeralRunnerState(ephemeralRunnerList)
log.Info("Ephemeral runner counts", log.Info("Ephemeral runner counts",
"pending", len(pendingEphemeralRunners), "pending", len(ephemeralRunnerState.pending),
"running", len(runningEphemeralRunners), "running", len(ephemeralRunnerState.running),
"finished", len(finishedEphemeralRunners), "finished", len(ephemeralRunnerState.finished),
"failed", len(failedEphemeralRunners), "failed", len(ephemeralRunnerState.failed),
"deleting", len(deletingEphemeralRunners), "deleting", len(ephemeralRunnerState.deleting),
) )
if r.PublishMetrics { if r.PublishMetrics {
@ -183,54 +184,52 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
Organization: parsedURL.Organization, Organization: parsedURL.Organization,
Enterprise: parsedURL.Enterprise, Enterprise: parsedURL.Enterprise,
}, },
len(pendingEphemeralRunners), len(ephemeralRunnerState.pending),
len(runningEphemeralRunners), len(ephemeralRunnerState.running),
len(failedEphemeralRunners), len(ephemeralRunnerState.failed),
) )
} }
// cleanup finished runners and proceed total := ephemeralRunnerState.scaleTotal()
var errs []error if ephemeralRunnerSet.Spec.PatchID == 0 || ephemeralRunnerSet.Spec.PatchID != ephemeralRunnerState.latestPatchID {
for i := range finishedEphemeralRunners { defer func() {
log.Info("Deleting finished ephemeral runner", "name", finishedEphemeralRunners[i].Name) if err := r.cleanupFinishedEphemeralRunners(ctx, ephemeralRunnerState.finished, log); err != nil {
if err := r.Delete(ctx, finishedEphemeralRunners[i]); err != nil { log.Error(err, "failed to cleanup finished ephemeral runners")
if !kerrors.IsNotFound(err) {
errs = append(errs, err)
} }
} }()
}
if len(errs) > 0 { log.Info("Scaling comparison", "current", total, "desired", ephemeralRunnerSet.Spec.Replicas)
mergedErrs := multierr.Combine(errs...) switch {
log.Error(mergedErrs, "Failed to delete finished ephemeral runners") case total < ephemeralRunnerSet.Spec.Replicas: // Handle scale up
return ctrl.Result{}, mergedErrs count := ephemeralRunnerSet.Spec.Replicas - total
} log.Info("Creating new ephemeral runners (scale up)", "count", count)
if err := r.createEphemeralRunners(ctx, ephemeralRunnerSet, count, log); err != nil {
log.Error(err, "failed to make ephemeral runner")
return ctrl.Result{}, err
}
total := len(pendingEphemeralRunners) + len(runningEphemeralRunners) + len(failedEphemeralRunners) case total > ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario.
log.Info("Scaling comparison", "current", total, "desired", ephemeralRunnerSet.Spec.Replicas) count := total - ephemeralRunnerSet.Spec.Replicas
switch { log.Info("Deleting ephemeral runners (scale down)", "count", count)
case total < ephemeralRunnerSet.Spec.Replicas: // Handle scale up if err := r.deleteIdleEphemeralRunners(
count := ephemeralRunnerSet.Spec.Replicas - total ctx,
log.Info("Creating new ephemeral runners (scale up)", "count", count) ephemeralRunnerSet,
if err := r.createEphemeralRunners(ctx, ephemeralRunnerSet, count, log); err != nil { ephemeralRunnerState.pending,
log.Error(err, "failed to make ephemeral runner") ephemeralRunnerState.running,
return ctrl.Result{}, err count,
} log,
); err != nil {
case total > ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario. log.Error(err, "failed to delete idle runners")
count := total - ephemeralRunnerSet.Spec.Replicas return ctrl.Result{}, err
log.Info("Deleting ephemeral runners (scale down)", "count", count) }
if err := r.deleteIdleEphemeralRunners(ctx, ephemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners, count, log); err != nil {
log.Error(err, "failed to delete idle runners")
return ctrl.Result{}, err
} }
} }
desiredStatus := v1alpha1.EphemeralRunnerSetStatus{ desiredStatus := v1alpha1.EphemeralRunnerSetStatus{
CurrentReplicas: total, CurrentReplicas: total,
PendingEphemeralRunners: len(pendingEphemeralRunners), PendingEphemeralRunners: len(ephemeralRunnerState.pending),
RunningEphemeralRunners: len(runningEphemeralRunners), RunningEphemeralRunners: len(ephemeralRunnerState.running),
FailedEphemeralRunners: len(failedEphemeralRunners), FailedEphemeralRunners: len(ephemeralRunnerState.failed),
} }
// Update the status if needed. // Update the status if needed.
@ -247,6 +246,21 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx context.Context, finishedEphemeralRunners []*v1alpha1.EphemeralRunner, log logr.Logger) error {
// cleanup finished runners and proceed
var errs []error
for i := range finishedEphemeralRunners {
log.Info("Deleting finished ephemeral runner", "name", finishedEphemeralRunners[i].Name)
if err := r.Delete(ctx, finishedEphemeralRunners[i]); err != nil {
if !kerrors.IsNotFound(err) {
errs = append(errs, err)
}
}
}
return multierr.Combine(errs...)
}
func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error { func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil { if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil {
return nil return nil
@ -284,19 +298,19 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
return true, nil return true, nil
} }
pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList) ephemeralRunnerState := newEphemeralRunnerState(ephemeralRunnerList)
log.Info("Clean up runner counts", log.Info("Clean up runner counts",
"pending", len(pendingEphemeralRunners), "pending", len(ephemeralRunnerState.pending),
"running", len(runningEphemeralRunners), "running", len(ephemeralRunnerState.running),
"finished", len(finishedEphemeralRunners), "finished", len(ephemeralRunnerState.finished),
"failed", len(failedEphemeralRunners), "failed", len(ephemeralRunnerState.failed),
"deleting", len(deletingEphemeralRunners), "deleting", len(ephemeralRunnerState.deleting),
) )
log.Info("Cleanup finished or failed ephemeral runners") log.Info("Cleanup finished or failed ephemeral runners")
var errs []error var errs []error
for _, ephemeralRunner := range append(finishedEphemeralRunners, failedEphemeralRunners...) { for _, ephemeralRunner := range append(ephemeralRunnerState.finished, ephemeralRunnerState.failed...) {
log.Info("Deleting ephemeral runner", "name", ephemeralRunner.Name) log.Info("Deleting ephemeral runner", "name", ephemeralRunner.Name)
if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) { if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) {
errs = append(errs, err) errs = append(errs, err)
@ -310,7 +324,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
} }
// avoid fetching the client if we have nothing left to do // avoid fetching the client if we have nothing left to do
if len(runningEphemeralRunners) == 0 && len(pendingEphemeralRunners) == 0 { if len(ephemeralRunnerState.running) == 0 && len(ephemeralRunnerState.pending) == 0 {
return false, nil return false, nil
} }
@ -321,7 +335,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
log.Info("Cleanup pending or running ephemeral runners") log.Info("Cleanup pending or running ephemeral runners")
errs = errs[0:0] errs = errs[0:0]
for _, ephemeralRunner := range append(pendingEphemeralRunners, runningEphemeralRunners...) { for _, ephemeralRunner := range append(ephemeralRunnerState.pending, ephemeralRunnerState.running...) {
log.Info("Removing the ephemeral runner from the service", "name", ephemeralRunner.Name) log.Info("Removing the ephemeral runner from the service", "name", ephemeralRunner.Name)
_, err := r.deleteEphemeralRunnerWithActionsClient(ctx, ephemeralRunner, actionsClient, log) _, err := r.deleteEphemeralRunnerWithActionsClient(ctx, ephemeralRunner, actionsClient, log)
if err != nil { if err != nil {
@ -427,12 +441,13 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
deletedCount := 0 deletedCount := 0
for runners.next() { for runners.next() {
ephemeralRunner := runners.object() ephemeralRunner := runners.object()
if ephemeralRunner.Status.RunnerId == 0 { isDone := ephemeralRunner.IsDone()
if !isDone && ephemeralRunner.Status.RunnerId == 0 {
log.Info("Skipping ephemeral runner since it is not registered yet", "name", ephemeralRunner.Name) log.Info("Skipping ephemeral runner since it is not registered yet", "name", ephemeralRunner.Name)
continue continue
} }
if ephemeralRunner.Status.JobRequestId > 0 { if !isDone && ephemeralRunner.Status.JobRequestId > 0 {
log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId) log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId)
continue continue
} }
@ -580,16 +595,22 @@ type ephemeralRunnerStepper struct {
index int index int
} }
func newEphemeralRunnerStepper(pending, running []*v1alpha1.EphemeralRunner) *ephemeralRunnerStepper { func newEphemeralRunnerStepper(primary []*v1alpha1.EphemeralRunner, othersOrdered ...[]*v1alpha1.EphemeralRunner) *ephemeralRunnerStepper {
sort.Slice(pending, func(i, j int) bool { sort.Slice(primary, func(i, j int) bool {
return pending[i].GetCreationTimestamp().Time.Before(pending[j].GetCreationTimestamp().Time) return primary[i].GetCreationTimestamp().Time.Before(primary[j].GetCreationTimestamp().Time)
})
sort.Slice(running, func(i, j int) bool {
return running[i].GetCreationTimestamp().Time.Before(running[j].GetCreationTimestamp().Time)
}) })
for _, bucket := range othersOrdered {
sort.Slice(bucket, func(i, j int) bool {
return bucket[i].GetCreationTimestamp().Time.Before(bucket[j].GetCreationTimestamp().Time)
})
}
for _, bucket := range othersOrdered {
primary = append(primary, bucket...)
}
return &ephemeralRunnerStepper{ return &ephemeralRunnerStepper{
items: append(pending, running...), items: primary,
index: -1, index: -1,
} }
} }
@ -613,28 +634,48 @@ func (s *ephemeralRunnerStepper) len() int {
return len(s.items) return len(s.items)
} }
func categorizeEphemeralRunners(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) (pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners []*v1alpha1.EphemeralRunner) { type ephemeralRunnerState struct {
pending []*v1alpha1.EphemeralRunner
running []*v1alpha1.EphemeralRunner
finished []*v1alpha1.EphemeralRunner
failed []*v1alpha1.EphemeralRunner
deleting []*v1alpha1.EphemeralRunner
latestPatchID int
}
func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) *ephemeralRunnerState {
var ephemeralRunnerState ephemeralRunnerState
for i := range ephemeralRunnerList.Items { for i := range ephemeralRunnerList.Items {
r := &ephemeralRunnerList.Items[i] r := &ephemeralRunnerList.Items[i]
patchID, err := strconv.Atoi(r.Annotations[AnnotationKeyPatchID])
if err == nil && patchID > ephemeralRunnerState.latestPatchID {
ephemeralRunnerState.latestPatchID = patchID
}
if !r.ObjectMeta.DeletionTimestamp.IsZero() { if !r.ObjectMeta.DeletionTimestamp.IsZero() {
deletingEphemeralRunners = append(deletingEphemeralRunners, r) ephemeralRunnerState.deleting = append(ephemeralRunnerState.deleting, r)
continue continue
} }
switch r.Status.Phase { switch r.Status.Phase {
case corev1.PodRunning: case corev1.PodRunning:
runningEphemeralRunners = append(runningEphemeralRunners, r) ephemeralRunnerState.running = append(ephemeralRunnerState.running, r)
case corev1.PodSucceeded: case corev1.PodSucceeded:
finishedEphemeralRunners = append(finishedEphemeralRunners, r) ephemeralRunnerState.finished = append(ephemeralRunnerState.finished, r)
case corev1.PodFailed: case corev1.PodFailed:
failedEphemeralRunners = append(failedEphemeralRunners, r) ephemeralRunnerState.failed = append(ephemeralRunnerState.failed, r)
default: default:
// Pending or no phase should be considered as pending. // Pending or no phase should be considered as pending.
// //
// If field is not set, that means that the EphemeralRunner // If field is not set, that means that the EphemeralRunner
// did not yet have chance to update the Status.Phase field. // did not yet have chance to update the Status.Phase field.
pendingEphemeralRunners = append(pendingEphemeralRunners, r) ephemeralRunnerState.pending = append(ephemeralRunnerState.pending, r)
} }
} }
return return &ephemeralRunnerState
}
func (s *ephemeralRunnerState) scaleTotal() int {
return len(s.pending) + len(s.running) + len(s.failed)
} }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"crypto/tls" "crypto/tls"
"encoding/base64" "encoding/base64"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -274,14 +275,17 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}) })
Context("When a new EphemeralRunnerSet scale up and down", func() { Context("When a new EphemeralRunnerSet scale up and down", func() {
It("It should delete finished EphemeralRunner and create new EphemeralRunner", func() { It("Should scale only on patch ID change", func() {
created := new(actionsv1alpha1.EphemeralRunnerSet) created := new(actionsv1alpha1.EphemeralRunnerSet)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
patchID := 1
// Scale up the EphemeralRunnerSet // Scale up the EphemeralRunnerSet
updated := created.DeepCopy() updated := created.DeepCopy()
updated.Spec.Replicas = 5 updated.Spec.Replicas = 5
updated.Spec.PatchID = patchID
err = k8sClient.Update(ctx, updated) err = k8sClient.Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
@ -317,7 +321,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil return len(runnerList.Items), nil
}, },
ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created") ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created")
// Mark one of the EphemeralRunner as finished // Mark one of the EphemeralRunner as finished
finishedRunner := runnerList.Items[4].DeepCopy() finishedRunner := runnerList.Items[4].DeepCopy()
@ -325,7 +330,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Status().Patch(ctx, finishedRunner, client.MergeFrom(&runnerList.Items[4])) err = k8sClient.Status().Patch(ctx, finishedRunner, client.MergeFrom(&runnerList.Items[4]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Wait for the finished EphemeralRunner to be deleted // Wait for the finished EphemeralRunner to be set to succeeded
Eventually( Eventually(
func() error { func() error {
runnerList := new(actionsv1alpha1.EphemeralRunnerList) runnerList := new(actionsv1alpha1.EphemeralRunnerList)
@ -335,17 +340,35 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
} }
for _, runner := range runnerList.Items { for _, runner := range runnerList.Items {
if runner.Name == finishedRunner.Name { if runner.Name != finishedRunner.Name {
return fmt.Errorf("EphemeralRunner is not deleted") continue
} }
if runner.Status.Phase != corev1.PodSucceeded {
return fmt.Errorf("EphemeralRunner is not finished")
}
// found pod succeeded
return nil
} }
return nil return errors.New("Finished ephemeral runner is not found")
}, },
ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval).Should(Succeed(), "Finished EphemeralRunner should be deleted") ephemeralRunnerSetTestInterval,
).Should(Succeed(), "Finished EphemeralRunner should be deleted")
// We should still have the EphemeralRunnerSet scale up // After one ephemeral runner is finished, simulate job done patch
patchID++
original := new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updated = original.DeepCopy()
updated.Spec.PatchID = patchID
updated.Spec.Replicas = 4
err = k8sClient.Patch(ctx, updated, client.MergeFrom(original))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// Only finished ephemeral runner should be deleted
runnerList = new(actionsv1alpha1.EphemeralRunnerList) runnerList = new(actionsv1alpha1.EphemeralRunnerList)
Eventually( Eventually(
func() (int, error) { func() (int, error) {
@ -354,35 +377,27 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return -1, err return -1, err
} }
// Set status to simulate a configured EphemeralRunner for _, runner := range runnerList.Items {
refetch := false if runner.Status.Phase == corev1.PodSucceeded {
for i, runner := range runnerList.Items { return -1, fmt.Errorf("Finished EphemeralRunner should be deleted")
if runner.Status.RunnerId == 0 {
updatedRunner := runner.DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.RunnerId = i + 100
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
refetch = true
}
}
if refetch {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
} }
} }
return len(runnerList.Items), nil return len(runnerList.Items), nil
}, },
ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created") ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(4), "4 EphemeralRunner should be created")
// Scale down the EphemeralRunnerSet // Scaling down the EphemeralRunnerSet
updated = created.DeepCopy() patchID++
original = new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updated = original.DeepCopy()
updated.Spec.PatchID = patchID
updated.Spec.Replicas = 3 updated.Spec.Replicas = 3
err = k8sClient.Patch(ctx, updated, client.MergeFrom(created)) err = k8sClient.Patch(ctx, updated, client.MergeFrom(original))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// Wait for the EphemeralRunnerSet to be scaled down // Wait for the EphemeralRunnerSet to be scaled down
@ -417,7 +432,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil return len(runnerList.Items), nil
}, },
ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(3), "3 EphemeralRunner should be created") ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(3), "3 EphemeralRunner should be created")
// We will not scale down runner that is running jobs // We will not scale down runner that is running jobs
runningRunner := runnerList.Items[0].DeepCopy() runningRunner := runnerList.Items[0].DeepCopy()
@ -430,10 +446,15 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[1])) err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Scale down to 1 // Scale down to 1 while 2 are running
updated = created.DeepCopy() patchID++
original = new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updated = original.DeepCopy()
updated.Spec.PatchID = patchID
updated.Spec.Replicas = 1 updated.Spec.Replicas = 1
err = k8sClient.Patch(ctx, updated, client.MergeFrom(created)) err = k8sClient.Patch(ctx, updated, client.MergeFrom(original))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// Wait for the EphemeralRunnerSet to be scaled down to 2 since we still have 2 runner running jobs // Wait for the EphemeralRunnerSet to be scaled down to 2 since we still have 2 runner running jobs
@ -468,7 +489,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil return len(runnerList.Items), nil
}, },
ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
// We will not scale down failed runner // We will not scale down failed runner
failedRunner := runnerList.Items[0].DeepCopy() failedRunner := runnerList.Items[0].DeepCopy()
@ -476,15 +498,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Status().Patch(ctx, failedRunner, client.MergeFrom(&runnerList.Items[0])) err = k8sClient.Status().Patch(ctx, failedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
// Scale down to 0
updated = created.DeepCopy()
updated.Spec.Replicas = 0
err = k8sClient.Patch(ctx, updated, client.MergeFrom(created))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// We should not scale down the EphemeralRunnerSet since we still have 1 runner running job and 1 failed runner
runnerList = new(actionsv1alpha1.EphemeralRunnerList) runnerList = new(actionsv1alpha1.EphemeralRunnerList)
Consistently( Eventually(
func() (int, error) { func() (int, error) {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil { if err != nil {
@ -514,7 +529,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil return len(runnerList.Items), nil
}, },
ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
// We will scale down to 0 when the running job is completed and the failed runner is deleted // We will scale down to 0 when the running job is completed and the failed runner is deleted
runningRunner = runnerList.Items[1].DeepCopy() runningRunner = runnerList.Items[1].DeepCopy()
@ -525,6 +541,17 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Delete(ctx, &runnerList.Items[0]) err = k8sClient.Delete(ctx, &runnerList.Items[0])
Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunner") Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunner")
// Scale down to 0 while 1 ephemeral runner is failed
patchID++
original = new(actionsv1alpha1.EphemeralRunnerSet)
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original)
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
updated = original.DeepCopy()
updated.Spec.PatchID = patchID
updated.Spec.Replicas = 0
err = k8sClient.Patch(ctx, updated, client.MergeFrom(original))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
// Wait for the EphemeralRunnerSet to be scaled down to 0 // Wait for the EphemeralRunnerSet to be scaled down to 0
runnerList = new(actionsv1alpha1.EphemeralRunnerList) runnerList = new(actionsv1alpha1.EphemeralRunnerList)
Eventually( Eventually(
@ -557,7 +584,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return len(runnerList.Items), nil return len(runnerList.Items), nil
}, },
ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created") ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created")
}) })
It("Should update status on Ephemeral Runner state changes", func() { It("Should update status on Ephemeral Runner state changes", func() {

View File

@ -91,7 +91,11 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-scale-set-listener", LabelKeyKubernetesComponent: "runner-scale-set-listener",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), }
annotations := map[string]string{
annotationKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
annotationKeyValuesHash: autoscalingRunnerSet.Annotations[annotationKeyValuesHash],
} }
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil { if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil {
@ -100,9 +104,10 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
autoscalingListener := &v1alpha1.AutoscalingListener{ autoscalingListener := &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerName(autoscalingRunnerSet), Name: scaleSetListenerName(autoscalingRunnerSet),
Namespace: namespace, Namespace: namespace,
Labels: labels, Labels: labels,
Annotations: annotations,
}, },
Spec: v1alpha1.AutoscalingListenerSpec{ Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
@ -498,7 +503,6 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
labels := map[string]string{ labels := map[string]string{
labelKeyRunnerSpecHash: runnerSpecHash,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-set", LabelKeyKubernetesComponent: "runner-set",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
@ -511,7 +515,10 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
} }
newAnnotations := map[string]string{ newAnnotations := map[string]string{
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
AnnotationKeyGitHubRunnerScaleSetName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName],
annotationKeyRunnerSpecHash: runnerSpecHash,
} }
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{ newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
@ -556,6 +563,7 @@ func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
for key, val := range ephemeralRunnerSet.Annotations { for key, val := range ephemeralRunnerSet.Annotations {
annotations[key] = val annotations[key] = val
} }
annotations[AnnotationKeyPatchID] = strconv.Itoa(ephemeralRunnerSet.Spec.PatchID)
return &v1alpha1.EphemeralRunner{ return &v1alpha1.EphemeralRunner{
TypeMeta: metav1.TypeMeta{}, TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{

View File

@ -23,8 +23,9 @@ func TestLabelPropagation(t *testing.T) {
LabelKeyKubernetesVersion: "0.2.0", LabelKeyKubernetesVersion: "0.2.0",
}, },
Annotations: map[string]string{ Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1", runnerScaleSetIdAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group", AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
}, },
}, },
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
@ -38,20 +39,21 @@ func TestLabelPropagation(t *testing.T) {
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf]) assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion]) assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash]) assert.NotEmpty(t, ephemeralRunnerSet.Annotations[annotationKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName]) assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace]) assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise]) assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise])
assert.Equal(t, "org", ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization]) assert.Equal(t, "org", ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization])
assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository]) assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]) assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName])
listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil) listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf]) assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion]) assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash]) assert.NotEmpty(t, ephemeralRunnerSet.Annotations[annotationKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName]) assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace]) assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise]) assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise])
@ -83,6 +85,7 @@ func TestLabelPropagation(t *testing.T) {
} }
assert.Equal(t, "runner", ephemeralRunner.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, "runner", ephemeralRunner.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunner.Annotations[AnnotationKeyGitHubRunnerGroupName]) assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunner.Annotations[AnnotationKeyGitHubRunnerGroupName])
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName])
runnerSecret := &corev1.Secret{ runnerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -109,8 +112,9 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
LabelKeyKubernetesVersion: "0.2.0", LabelKeyKubernetesVersion: "0.2.0",
}, },
Annotations: map[string]string{ Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1", runnerScaleSetIdAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group", AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
}, },
}, },
} }

View File

@ -43,6 +43,37 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
## Changelog ## Changelog
### v0.9.0
#### ⚠️ Warning
- This release contains CRD changes. During the upgrade, please remove the old CRDs before re-installing the new version. For more information, please read the [Upgrading ARC](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#upgrading-arc).
- This release contains changes in the [default docker socket path](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#upgrading-arc) expanded for container mode `dind`.
- Older version of the listener (`githubrunnerscalesetlistener`) is deprecated and will be removed in the future `0.10.0` release.
Please evaluate these changes carefully before upgrading.
#### Major changes
1. Change docker socket path to /var/run/docker.sock [#3337](https://github.com/actions/actions-runner-controller/pull/3337)
1. Update metrics to include repository on job-based label [#3310](https://github.com/actions/actions-runner-controller/pull/3310)
1. Bump Go version to 1.22.1 [#3290](https://github.com/actions/actions-runner-controller/pull/3290)
1. Propagate runner scale set name annotation to EphemeralRunner [#3098](https://github.com/actions/actions-runner-controller/pull/3098)
1. Add annotation with values hash to re-create listener [#3195](https://github.com/actions/actions-runner-controller/pull/3195)
1. Fix overscaling when the controller is much faster then the listener [#3371](https://github.com/actions/actions-runner-controller/pull/3371)
1. Add retry on 401 and 403 for runner-registration [#3377](https://github.com/actions/actions-runner-controller/pull/3377)
### v0.8.3
1. Expose volumeMounts and volumes in gha-runner-scale-set-controller [#3260](https://github.com/actions/actions-runner-controller/pull/3260)
1. Refer to the correct variable in discovery error message [#3296](https://github.com/actions/actions-runner-controller/pull/3296)
1. Fix acquire jobs after session refresh ghalistener [#3307](https://github.com/actions/actions-runner-controller/pull/3307)
### v0.8.2
1. Add listener graceful termination period and background context after the message is received [#3187](https://github.com/actions/actions-runner-controller/pull/3187)
1. Publish metrics in the new ghalistener [#3193](https://github.com/actions/actions-runner-controller/pull/3193)
1. Delete message session when listener.Listen returns [#3240](https://github.com/actions/actions-runner-controller/pull/3240)
### v0.8.1 ### v0.8.1
1. Fix proxy issue in new listener client [#3181](https://github.com/actions/actions-runner-controller/pull/3181) 1. Fix proxy issue in new listener client [#3181](https://github.com/actions/actions-runner-controller/pull/3181)

View File

@ -12,4 +12,4 @@ We do not intend to provide a supported ARC dashboard. This is simply a referenc
1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster. 1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster.
2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners. 2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners.
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json.json) into Grafana. 3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json) into Grafana.

View File

@ -975,20 +975,38 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis
c.logger.Info("getting Actions tenant URL and JWT", "registrationURL", req.URL.String()) c.logger.Info("getting Actions tenant URL and JWT", "registrationURL", req.URL.String())
resp, err := c.Do(req) var resp *http.Response
if err != nil { retry := 0
return nil, err for {
} var err error
defer resp.Body.Close() resp, err = c.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 { if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
registrationErr := fmt.Errorf("unexpected response from Actions service during registration call: %v", resp.StatusCode) break
}
errStr := fmt.Sprintf("unexpected response from Actions service during registration call: %v", resp.StatusCode)
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, fmt.Errorf("%v - %v", registrationErr, err) err = fmt.Errorf("%s - %w", errStr, err)
} else {
err = fmt.Errorf("%s - %v", errStr, string(body))
} }
return nil, fmt.Errorf("%v - %v", registrationErr, string(body))
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
return nil, err
}
retry++
if retry > 3 {
return nil, fmt.Errorf("unable to register runner after 3 retries: %v", err)
}
time.Sleep(time.Duration(500 * int(time.Millisecond) * (retry + 1)))
} }
var actionsServiceAdminConnection *ActionsServiceAdminConnection var actionsServiceAdminConnection *ActionsServiceAdminConnection

View File

@ -2,6 +2,7 @@ package actions_test
import ( import (
"context" "context"
"encoding/json"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
@ -152,6 +153,43 @@ func TestNewActionsServiceRequest(t *testing.T) {
assert.Equal(t, client.ActionsServiceAdminTokenExpiresAt, expiresAt) assert.Equal(t, client.ActionsServiceAdminTokenExpiresAt, expiresAt)
}) })
t.Run("admin token refresh retry", func(t *testing.T) {
newToken := defaultActionsToken(t)
errMessage := `{"message":"test"}`
srv := "http://github.com/my-org"
resp := &actions.ActionsServiceAdminConnection{
AdminToken: &newToken,
ActionsServiceUrl: &srv,
}
failures := 0
unauthorizedHandler := func(w http.ResponseWriter, r *http.Request) {
if failures < 2 {
failures++
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte(errMessage))
return
}
w.WriteHeader(http.StatusCreated)
_ = json.NewEncoder(w).Encode(resp)
}
server := testserver.New(t, nil, testserver.WithActionsToken("random-token"), testserver.WithActionsToken(newToken), testserver.WithActionsRegistrationTokenHandler(unauthorizedHandler))
client, err := actions.NewClient(server.ConfigURLForOrg("my-org"), defaultCreds)
require.NoError(t, err)
expiringToken := "expiring-token"
expiresAt := time.Now().Add(59 * time.Second)
client.ActionsServiceAdminToken = expiringToken
client.ActionsServiceAdminTokenExpiresAt = expiresAt
_, err = client.NewActionsServiceRequest(ctx, http.MethodGet, "my-path", nil)
require.NoError(t, err)
assert.Equal(t, client.ActionsServiceAdminToken, newToken)
assert.Equal(t, client.ActionsServiceURL, srv)
assert.NotEqual(t, client.ActionsServiceAdminTokenExpiresAt, expiresAt)
})
t.Run("token is currently valid", func(t *testing.T) { t.Run("token is currently valid", func(t *testing.T) {
tokenThatShouldNotBeFetched := defaultActionsToken(t) tokenThatShouldNotBeFetched := defaultActionsToken(t)
server := testserver.New(t, nil, testserver.WithActionsToken(tokenThatShouldNotBeFetched)) server := testserver.New(t, nil, testserver.WithActionsToken(tokenThatShouldNotBeFetched))

26
go.mod
View File

@ -1,23 +1,23 @@
module github.com/actions/actions-runner-controller module github.com/actions/actions-runner-controller
go 1.21.3 go 1.22.1
require ( require (
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0 github.com/bradleyfalzon/ghinstallation/v2 v2.8.0
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/evanphx/json-patch v5.7.0+incompatible github.com/evanphx/json-patch v5.9.0+incompatible
github.com/go-logr/logr v1.3.0 github.com/go-logr/logr v1.4.1
github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang-jwt/jwt/v4 v4.5.0
github.com/google/go-cmp v0.6.0 github.com/google/go-cmp v0.6.0
github.com/google/go-github/v52 v52.0.0 github.com/google/go-github/v52 v52.0.0
github.com/google/uuid v1.4.0 github.com/google/uuid v1.6.0
github.com/gorilla/mux v1.8.1 github.com/gorilla/mux v1.8.1
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
github.com/gruntwork-io/terratest v0.46.7 github.com/gruntwork-io/terratest v0.46.7
github.com/hashicorp/go-retryablehttp v0.7.5 github.com/hashicorp/go-retryablehttp v0.7.5
github.com/kelseyhightower/envconfig v1.4.0 github.com/kelseyhightower/envconfig v1.4.0
github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.13.1 github.com/onsi/ginkgo/v2 v2.17.1
github.com/onsi/gomega v1.30.0 github.com/onsi/gomega v1.30.0
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_golang v1.17.0
@ -25,9 +25,9 @@ require (
github.com/teambition/rrule-go v1.8.2 github.com/teambition/rrule-go v1.8.2
go.uber.org/multierr v1.11.0 go.uber.org/multierr v1.11.0
go.uber.org/zap v1.26.0 go.uber.org/zap v1.26.0
golang.org/x/net v0.19.0 golang.org/x/net v0.24.0
golang.org/x/oauth2 v0.15.0 golang.org/x/oauth2 v0.15.0
golang.org/x/sync v0.5.0 golang.org/x/sync v0.6.0
gomodules.xyz/jsonpatch/v2 v2.4.0 gomodules.xyz/jsonpatch/v2 v2.4.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.28.4 k8s.io/api v0.28.4
@ -43,7 +43,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/boombuler/barcode v1.0.1 // indirect github.com/boombuler/barcode v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudflare/circl v1.3.6 // indirect github.com/cloudflare/circl v1.3.7 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch/v5 v5.7.0 // indirect github.com/evanphx/json-patch/v5 v5.7.0 // indirect
@ -89,15 +89,15 @@ require (
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.1 // indirect github.com/stretchr/objx v0.5.1 // indirect
github.com/urfave/cli v1.22.2 // indirect github.com/urfave/cli v1.22.2 // indirect
golang.org/x/crypto v0.16.0 // indirect golang.org/x/crypto v0.22.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
golang.org/x/sys v0.15.0 // indirect golang.org/x/sys v0.19.0 // indirect
golang.org/x/term v0.15.0 // indirect golang.org/x/term v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.4.0 // indirect golang.org/x/time v0.4.0 // indirect
golang.org/x/tools v0.15.0 // indirect golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.8 // indirect
google.golang.org/protobuf v1.31.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.28.3 // indirect k8s.io/apiextensions-apiserver v0.28.3 // indirect

49
go.sum
View File

@ -19,8 +19,8 @@ github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7N
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.3.6 h1:/xbKIqSHbZXHwkhbrhrt2YOHIwYJlXH94E3tI/gDlUg= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.6/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@ -30,8 +30,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
@ -44,8 +44,9 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU= github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU=
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
@ -99,8 +100,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a h1:fEBsGL/sjAuJrgah5XqmmYsTLzJp/TO9Lhy39gkverk= github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a h1:fEBsGL/sjAuJrgah5XqmmYsTLzJp/TO9Lhy39gkverk=
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@ -168,8 +169,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU= github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8=
github.com/onsi/ginkgo/v2 v2.13.1/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
@ -233,16 +234,14 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -255,8 +254,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -265,8 +264,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -288,15 +287,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@ -315,8 +314,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -333,8 +332,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@ -6,8 +6,8 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
OS_IMAGE ?= ubuntu-22.04 OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch) TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.312.0 RUNNER_VERSION ?= 2.315.0
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.5.0 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.0
DOCKER_VERSION ?= 24.0.7 DOCKER_VERSION ?= 24.0.7
# default list of platforms for which multiarch image is built # default list of platforms for which multiarch image is built

View File

@ -1,2 +1,2 @@
RUNNER_VERSION=2.312.0 RUNNER_VERSION=2.315.0
RUNNER_CONTAINER_HOOKS_VERSION=0.5.0 RUNNER_CONTAINER_HOOKS_VERSION=0.6.0

View File

@ -36,8 +36,8 @@ var (
testResultCMNamePrefix = "test-result-" testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.312.0" RunnerVersion = "2.315.0"
RunnerContainerHooksVersion = "0.5.0" RunnerContainerHooksVersion = "0.6.0"
) )
// If you're willing to run this test via VS Code "run test" or "debug test", // If you're willing to run this test via VS Code "run test" or "debug test",
@ -93,7 +93,7 @@ func TestE2E(t *testing.T) {
os.Getenv("UBUNTU_VERSION"), os.Getenv("UBUNTU_VERSION"),
) )
var testedVersions = []struct { testedVersions := []struct {
label string label string
controller, controllerVer string controller, controllerVer string
chart, chartVer string chart, chartVer string
@ -154,9 +154,7 @@ func TestE2E(t *testing.T) {
t.Skip("RunnerSets test has been skipped due to ARC_E2E_SKIP_RUNNERSETS") t.Skip("RunnerSets test has been skipped due to ARC_E2E_SKIP_RUNNERSETS")
} }
var ( var testID string
testID string
)
t.Run("get or generate test ID", func(t *testing.T) { t.Run("get or generate test ID", func(t *testing.T) {
testID = env.GetOrGenerateTestID(t) testID = env.GetOrGenerateTestID(t)
@ -268,9 +266,7 @@ func TestE2E(t *testing.T) {
t.Skip("RunnerSets test has been skipped due to ARC_E2E_SKIP_RUNNERSETS") t.Skip("RunnerSets test has been skipped due to ARC_E2E_SKIP_RUNNERSETS")
} }
var ( var testID string
testID string
)
t.Run("get or generate test ID", func(t *testing.T) { t.Run("get or generate test ID", func(t *testing.T) {
testID = env.GetOrGenerateTestID(t) testID = env.GetOrGenerateTestID(t)
@ -1110,7 +1106,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
testing.Step{ testing.Step{
Uses: "actions/setup-go@v3", Uses: "actions/setup-go@v3",
With: &testing.With{ With: &testing.With{
GoVersion: "1.21.3", GoVersion: "1.22.1",
}, },
}, },
) )
@ -1240,7 +1236,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
testing.Step{ testing.Step{
Uses: "azure/setup-kubectl@v1", Uses: "azure/setup-kubectl@v1",
With: &testing.With{ With: &testing.With{
Version: "v1.21.3", Version: "v1.22.1",
}, },
}, },
testing.Step{ testing.Step{