merging upstream changes

This commit is contained in:
prosper-sre 2025-03-27 13:00:41 -07:00
commit 4ed1f82934
161 changed files with 22768 additions and 9672 deletions

View File

@ -188,6 +188,19 @@ runs:
}
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
- name: Gather listener logs
shell: bash
if: always()
run: |
LISTENER_POD="$(kubectl get autoscalinglisteners.actions.github.com -n arc-systems -o jsonpath='{.items[*].metadata.name}')"
kubectl logs $LISTENER_POD -n ${{inputs.arc-controller-namespace}}
- name: Gather coredns logs
shell: bash
if: always()
run: |
kubectl logs deployments/coredns -n kube-system
- name: cleanup
if: inputs.wait-to-finish == 'true'
shell: bash
@ -195,8 +208,8 @@ runs:
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
kubectl wait --timeout=30s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-namespace}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
- name: Gather logs and cleanup
- name: Gather controller logs
shell: bash
if: always()
run: |
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}

View File

@ -9,3 +9,15 @@ updates:
directory: "/" # Location of package manifests
schedule:
interval: "weekly"
groups:
gomod:
patterns:
- "*"
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: "weekly"
groups:
actions:
patterns:
- "*"

View File

@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.9.1"
IMAGE_VERSION: "0.11.0"
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
@ -103,6 +103,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@ -194,6 +196,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@ -284,6 +288,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@ -383,6 +389,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@ -484,6 +492,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@ -579,6 +589,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@ -699,6 +711,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@ -789,6 +803,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Trigger long running jobs and wait for runners to pick them up
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10

View File

@ -18,7 +18,7 @@ on:
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.16.1
HELM_VERSION: v3.8.0
HELM_VERSION: v3.17.0
permissions:
contents: read
@ -46,22 +46,6 @@ jobs:
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
with:
@ -123,3 +107,17 @@ jobs:
if: steps.list-changed.outputs.changed == 'true'
run: |
ct install --config charts/.ci/ct-config-gha.yaml
test-chart:
name: Test Chart
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: "go.mod"
cache: false
- name: Test gha-runner-scale-set
run: go test ./charts/gha-runner-scale-set/...
- name: Test gha-runner-scale-set-controller
run: go test ./charts/gha-runner-scale-set-controller/...

View File

@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.22.4 as builder
FROM --platform=$BUILDPLATFORM golang:1.24.0 as builder
WORKDIR /workspace
@ -37,7 +37,6 @@ RUN --mount=target=. \
--mount=type=cache,mode=0777,target=${GOCACHE} \
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/manager main.go && \
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/ghalistener ./cmd/ghalistener && \
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \
@ -52,7 +51,6 @@ WORKDIR /
COPY --from=builder /out/manager .
COPY --from=builder /out/github-webhook-server .
COPY --from=builder /out/actions-metrics-server .
COPY --from=builder /out/github-runnerscaleset-listener .
COPY --from=builder /out/ghalistener .
COPY --from=builder /out/sleep .

View File

@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.319.1
RUNNER_VERSION ?= 2.323.0
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}
@ -23,7 +23,7 @@ KUBE_RBAC_PROXY_VERSION ?= v0.11.0
SHELLCHECK_VERSION ?= 0.8.0
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true"
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=true"
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@ -87,7 +87,7 @@ test-with-deps: kube-apiserver etcd kubectl
# Build manager binary
manager: generate fmt vet
go build -o bin/manager main.go
go build -o bin/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener
go build -o bin/github-runnerscaleset-listener ./cmd/ghalistener
# Run against the configured Kubernetes cluster in ~/.kube/config
run: generate fmt vet manifests
@ -310,7 +310,7 @@ github-release: release
# Otherwise we get errors like the below:
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
#
# Note that controller-gen newer than 0.6.1 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Note that controller-gen newer than 0.6.2 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
controller-gen:
ifeq (, $(shell which controller-gen))
@ -320,7 +320,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
cd $$CONTROLLER_GEN_TMP_DIR ;\
go mod init tmp ;\
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0 ;\
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.17.2 ;\
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
}
endif

View File

@ -11,21 +11,22 @@ Actions Runner Controller (ARC) is a Kubernetes operator that orchestrates and s
With ARC, you can create runner scale sets that automatically scale based on the number of workflows running in your repository, organization, or enterprise. Because controlled runners can be ephemeral and based on containers, new runner instances can scale up or down rapidly and cleanly. For more information about autoscaling, see ["Autoscaling with self-hosted runners."](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/autoscaling-with-self-hosted-runners)
You can set up ARC on Kubernetes using Helm, then create and run a workflow that uses runner scale sets. For more information about runner scale sets, see ["Deploying runner scale sets with Actions Runner Controller."](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#runner-scale-set)
## People
Actions Runner Controller (ARC) is an open-source project currently developed and maintained in collaboration with the GitHub Actions team, external maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions).
If you think the project is awesome and is adding value to your business, please consider directly sponsoring [community maintainers](https://github.com/sponsors/actions-runner-controller) and individual contributors via GitHub Sponsors.
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
If you are already the employer of one of the contributors, sponsoring via GitHub Sponsors might not be an option. Just support them by other means!
See [the sponsorship dashboard](https://github.com/sponsors/actions-runner-controller) for the former and the current sponsors.
## Getting Started
To give ARC a try with just a handful of commands, Please refer to the [Quickstart guide](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller).
To give ARC a try with just a handful of commands, please refer to the [Quickstart guide](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller).
For an overview of ARC, please refer to [About ARC](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller)
For an overview of ARC, please refer to [About ARC](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller).
With the introduction of [autoscaling runner scale sets](https://github.com/actions/actions-runner-controller/discussions/2775), the existing [autoscaling modes](./docs/automatically-scaling-runners.md) are now legacy. The legacy modes have certain use cases and will continue to be maintained by the community only.
@ -37,7 +38,7 @@ ARC documentation is available on [docs.github.com](https://docs.github.com/en/a
### Legacy documentation
The following documentation is for the legacy autoscaling modes that continue to be maintained by the community
The following documentation is for the legacy autoscaling modes that continue to be maintained by the community:
- [Quickstart guide](/docs/quickstart.md)
- [About ARC](/docs/about-arc.md)

View File

@ -61,6 +61,9 @@ type AutoscalingListenerSpec struct {
// +optional
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
// +optional
Metrics *MetricsConfig `json:"metrics,omitempty"`
// +optional
Template *corev1.PodTemplateSpec `json:"template,omitempty"`
}
@ -68,11 +71,11 @@ type AutoscalingListenerSpec struct {
// AutoscalingListenerStatus defines the observed state of AutoscalingListener
type AutoscalingListenerStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
//+kubebuilder:printcolumn:JSONPath=".spec.githubConfigUrl",name=GitHub Configure URL,type=string
//+kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetNamespace",name=AutoscalingRunnerSet Namespace,type=string
//+kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetName",name=AutoscalingRunnerSet Name,type=string
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.githubConfigUrl",name=GitHub Configure URL,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetNamespace",name=AutoscalingRunnerSet Namespace,type=string
// +kubebuilder:printcolumn:JSONPath=".spec.autoscalingRunnerSetName",name=AutoscalingRunnerSet Name,type=string
// AutoscalingListener is the Schema for the autoscalinglisteners API
type AutoscalingListener struct {
@ -83,7 +86,7 @@ type AutoscalingListener struct {
Status AutoscalingListenerStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// +kubebuilder:object:root=true
// AutoscalingListenerList contains a list of AutoscalingListener
type AutoscalingListenerList struct {

View File

@ -31,16 +31,16 @@ import (
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
//+kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string
//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.minRunners",name=Minimum Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".spec.maxRunners",name=Maximum Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".status.currentRunners",name=Current Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string
// +kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
// AutoscalingRunnerSet is the Schema for the autoscalingrunnersets API
type AutoscalingRunnerSet struct {
@ -74,6 +74,9 @@ type AutoscalingRunnerSetSpec struct {
// Required
Template corev1.PodTemplateSpec `json:"template,omitempty"`
// +optional
ListenerMetrics *MetricsConfig `json:"listenerMetrics,omitempty"`
// +optional
ListenerTemplate *corev1.PodTemplateSpec `json:"listenerTemplate,omitempty"`
@ -232,6 +235,32 @@ type ProxyServerConfig struct {
CredentialSecretRef string `json:"credentialSecretRef,omitempty"`
}
// MetricsConfig holds configuration parameters for each metric type
type MetricsConfig struct {
// +optional
Counters map[string]*CounterMetric `json:"counters,omitempty"`
// +optional
Gauges map[string]*GaugeMetric `json:"gauges,omitempty"`
// +optional
Histograms map[string]*HistogramMetric `json:"histograms,omitempty"`
}
// CounterMetric holds configuration of a single metric of type Counter
type CounterMetric struct {
Labels []string `json:"labels"`
}
// GaugeMetric holds configuration of a single metric of type Gauge
type GaugeMetric struct {
Labels []string `json:"labels"`
}
// HistogramMetric holds configuration of a single metric of type Histogram
type HistogramMetric struct {
Labels []string `json:"labels"`
Buckets []float64 `json:"buckets,omitempty"`
}
// AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
type AutoscalingRunnerSetStatus struct {
// +optional
@ -242,7 +271,7 @@ type AutoscalingRunnerSetStatus struct {
// EphemeralRunner counts separated by the stage ephemeral runners are in, taken from the EphemeralRunnerSet
//+optional
// +optional
PendingEphemeralRunners int `json:"pendingEphemeralRunners"`
// +optional
RunningEphemeralRunners int `json:"runningEphemeralRunners"`
@ -278,7 +307,7 @@ func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
return hash.ComputeTemplateHash(&spec)
}
//+kubebuilder:object:root=true
// +kubebuilder:object:root=true
// AutoscalingRunnerSetList contains a list of AutoscalingRunnerSet
type AutoscalingRunnerSetList struct {

View File

@ -21,8 +21,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// EphemeralRunnerContainerName is the name of the runner container.
// It represents the name of the container running the self-hosted runner image.
const EphemeralRunnerContainerName = "runner"
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.githubConfigUrl",name="GitHub Config URL",type=string
// +kubebuilder:printcolumn:JSONPath=".status.runnerId",name=RunnerId,type=number
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
@ -46,11 +50,25 @@ func (er *EphemeralRunner) IsDone() bool {
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
}
func (er *EphemeralRunner) HasContainerHookConfigured() bool {
for i := range er.Spec.Spec.Containers {
if er.Spec.Spec.Containers[i].Name != EphemeralRunnerContainerName {
continue
}
for _, env := range er.Spec.Spec.Containers[i].Env {
if env.Name == "ACTIONS_RUNNER_CONTAINER_HOOKS" {
return true
}
}
return false
}
return false
}
// EphemeralRunnerSpec defines the desired state of EphemeralRunner
type EphemeralRunnerSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// +required
GitHubConfigUrl string `json:"githubConfigUrl,omitempty"`
@ -69,15 +87,11 @@ type EphemeralRunnerSpec struct {
// +optional
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
// +required
corev1.PodTemplateSpec `json:",inline"`
}
// EphemeralRunnerStatus defines the observed state of EphemeralRunner
type EphemeralRunnerStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Turns true only if the runner is online.
// +optional
Ready bool `json:"ready"`
@ -123,7 +137,7 @@ type EphemeralRunnerStatus struct {
JobDisplayName string `json:"jobDisplayName,omitempty"`
}
//+kubebuilder:object:root=true
// +kubebuilder:object:root=true
// EphemeralRunnerList contains a list of EphemeralRunner
type EphemeralRunnerList struct {

View File

@ -26,7 +26,7 @@ type EphemeralRunnerSetSpec struct {
Replicas int `json:"replicas,omitempty"`
// PatchID is the unique identifier for the patch issued by the listener app
PatchID int `json:"patchID"`
// EphemeralRunnerSpec is the spec of the ephemeral runner
EphemeralRunnerSpec EphemeralRunnerSpec `json:"ephemeralRunnerSpec,omitempty"`
}
@ -34,9 +34,6 @@ type EphemeralRunnerSetSpec struct {
type EphemeralRunnerSetStatus struct {
// CurrentReplicas is the number of currently running EphemeralRunner resources being managed by this EphemeralRunnerSet.
CurrentReplicas int `json:"currentReplicas"`
// EphemeralRunner counts separated by the stage ephemeral runners are in
// +optional
PendingEphemeralRunners int `json:"pendingEphemeralRunners"`
// +optional
@ -49,10 +46,10 @@ type EphemeralRunnerSetStatus struct {
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="DesiredReplicas",type="integer"
// +kubebuilder:printcolumn:JSONPath=".status.currentReplicas", name="CurrentReplicas",type="integer"
//+kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
//+kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".status.pendingEphemeralRunners",name=Pending Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".status.runningEphemeralRunners",name=Running Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".status.finishedEphemeralRunners",name=Finished Runners,type=integer
// +kubebuilder:printcolumn:JSONPath=".status.deletingEphemeralRunners",name=Deleting Runners,type=integer
// EphemeralRunnerSet is the Schema for the ephemeralrunnersets API
type EphemeralRunnerSet struct {
@ -63,7 +60,7 @@ type EphemeralRunnerSet struct {
Status EphemeralRunnerSetStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// +kubebuilder:object:root=true
// EphemeralRunnerSetList contains a list of EphemeralRunnerSet
type EphemeralRunnerSetList struct {

View File

@ -102,6 +102,11 @@ func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) {
*out = new(GitHubServerTLSConfig)
(*in).DeepCopyInto(*out)
}
if in.Metrics != nil {
in, out := &in.Metrics, &out.Metrics
*out = new(MetricsConfig)
(*in).DeepCopyInto(*out)
}
if in.Template != nil {
in, out := &in.Template, &out.Template
*out = new(v1.PodTemplateSpec)
@ -207,6 +212,11 @@ func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
if in.ListenerMetrics != nil {
in, out := &in.ListenerMetrics, &out.ListenerMetrics
*out = new(MetricsConfig)
(*in).DeepCopyInto(*out)
}
if in.ListenerTemplate != nil {
in, out := &in.ListenerTemplate, &out.ListenerTemplate
*out = new(v1.PodTemplateSpec)
@ -249,6 +259,26 @@ func (in *AutoscalingRunnerSetStatus) DeepCopy() *AutoscalingRunnerSetStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CounterMetric) DeepCopyInto(out *CounterMetric) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CounterMetric.
func (in *CounterMetric) DeepCopy() *CounterMetric {
if in == nil {
return nil
}
out := new(CounterMetric)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EphemeralRunner) DeepCopyInto(out *EphemeralRunner) {
*out = *in
@ -446,6 +476,26 @@ func (in *EphemeralRunnerStatus) DeepCopy() *EphemeralRunnerStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GaugeMetric) DeepCopyInto(out *GaugeMetric) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GaugeMetric.
func (in *GaugeMetric) DeepCopy() *GaugeMetric {
if in == nil {
return nil
}
out := new(GaugeMetric)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitHubServerTLSConfig) DeepCopyInto(out *GitHubServerTLSConfig) {
*out = *in
@ -466,6 +516,94 @@ func (in *GitHubServerTLSConfig) DeepCopy() *GitHubServerTLSConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HistogramMetric) DeepCopyInto(out *HistogramMetric) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Buckets != nil {
in, out := &in.Buckets, &out.Buckets
*out = make([]float64, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HistogramMetric.
func (in *HistogramMetric) DeepCopy() *HistogramMetric {
if in == nil {
return nil
}
out := new(HistogramMetric)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsConfig) DeepCopyInto(out *MetricsConfig) {
*out = *in
if in.Counters != nil {
in, out := &in.Counters, &out.Counters
*out = make(map[string]*CounterMetric, len(*in))
for key, val := range *in {
var outVal *CounterMetric
if val == nil {
(*out)[key] = nil
} else {
inVal := (*in)[key]
in, out := &inVal, &outVal
*out = new(CounterMetric)
(*in).DeepCopyInto(*out)
}
(*out)[key] = outVal
}
}
if in.Gauges != nil {
in, out := &in.Gauges, &out.Gauges
*out = make(map[string]*GaugeMetric, len(*in))
for key, val := range *in {
var outVal *GaugeMetric
if val == nil {
(*out)[key] = nil
} else {
inVal := (*in)[key]
in, out := &inVal, &outVal
*out = new(GaugeMetric)
(*in).DeepCopyInto(*out)
}
(*out)[key] = outVal
}
}
if in.Histograms != nil {
in, out := &in.Histograms, &out.Histograms
*out = make(map[string]*HistogramMetric, len(*in))
for key, val := range *in {
var outVal *HistogramMetric
if val == nil {
(*out)[key] = nil
} else {
inVal := (*in)[key]
in, out := &inVal, &outVal
*out = new(HistogramMetric)
(*in).DeepCopyInto(*out)
}
(*out)[key] = outVal
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfig.
func (in *MetricsConfig) DeepCopy() *MetricsConfig {
if in == nil {
return nil
}
out := new(MetricsConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
*out = *in

View File

@ -317,19 +317,19 @@ type RunnerStatusRegistration struct {
type WorkVolumeClaimTemplate struct {
StorageClassName string `json:"storageClassName"`
AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes"`
Resources corev1.ResourceRequirements `json:"resources"`
Resources corev1.VolumeResourceRequirements `json:"resources"`
}
func (w *WorkVolumeClaimTemplate) validate() error {
if w.AccessModes == nil || len(w.AccessModes) == 0 {
return errors.New("Access mode should have at least one mode specified")
if len(w.AccessModes) == 0 {
return errors.New("access mode should have at least one mode specified")
}
for _, accessMode := range w.AccessModes {
switch accessMode {
case corev1.ReadWriteOnce, corev1.ReadWriteMany:
default:
return fmt.Errorf("Access mode %v is not supported", accessMode)
return fmt.Errorf("access mode %v is not supported", accessMode)
}
}
return nil

View File

@ -17,6 +17,9 @@ limitations under the License.
package v1alpha1
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
@ -32,36 +35,51 @@ var runnerLog = logf.Log.WithName("runner-resource")
func (r *Runner) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
WithDefaulter(&RunnerDefaulter{}).
WithValidator(&RunnerValidator{}).
Complete()
}
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=mutate.runner.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
var _ webhook.Defaulter = &Runner{}
var _ webhook.CustomDefaulter = &RunnerDefaulter{}
type RunnerDefaulter struct{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (r *Runner) Default() {
func (*RunnerDefaulter) Default(ctx context.Context, obj runtime.Object) error {
// Nothing to do.
return nil
}
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=validate.runner.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
var _ webhook.Validator = &Runner{}
var _ webhook.CustomValidator = &RunnerValidator{}
type RunnerValidator struct{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *Runner) ValidateCreate() (admission.Warnings, error) {
func (*RunnerValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
r, ok := obj.(*Runner)
if !ok {
return nil, fmt.Errorf("expected Runner object, got %T", obj)
}
runnerLog.Info("validate resource to be created", "name", r.Name)
return nil, r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *Runner) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
func (*RunnerValidator) ValidateUpdate(ctx context.Context, old, obj runtime.Object) (admission.Warnings, error) {
r, ok := obj.(*Runner)
if !ok {
return nil, fmt.Errorf("expected Runner object, got %T", obj)
}
runnerLog.Info("validate resource to be updated", "name", r.Name)
return nil, r.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *Runner) ValidateDelete() (admission.Warnings, error) {
func (*RunnerValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
return nil, nil
}

View File

@ -17,6 +17,9 @@ limitations under the License.
package v1alpha1
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
@ -32,36 +35,51 @@ var runnerDeploymentLog = logf.Log.WithName("runnerdeployment-resource")
func (r *RunnerDeployment) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
WithDefaulter(&RunnerDeploymentDefaulter{}).
WithValidator(&RunnerDeploymentValidator{}).
Complete()
}
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=mutate.runnerdeployment.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
var _ webhook.Defaulter = &RunnerDeployment{}
var _ webhook.CustomDefaulter = &RunnerDeploymentDefaulter{}
type RunnerDeploymentDefaulter struct{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (r *RunnerDeployment) Default() {
func (*RunnerDeploymentDefaulter) Default(context.Context, runtime.Object) error {
// Nothing to do.
return nil
}
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=validate.runnerdeployment.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
var _ webhook.Validator = &RunnerDeployment{}
var _ webhook.CustomValidator = &RunnerDeploymentValidator{}
type RunnerDeploymentValidator struct{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerDeployment) ValidateCreate() (admission.Warnings, error) {
func (*RunnerDeploymentValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
r, ok := obj.(*RunnerDeployment)
if !ok {
return nil, fmt.Errorf("expected RunnerDeployment object, got %T", obj)
}
runnerDeploymentLog.Info("validate resource to be created", "name", r.Name)
return nil, r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerDeployment) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
func (*RunnerDeploymentValidator) ValidateUpdate(ctx context.Context, old, obj runtime.Object) (admission.Warnings, error) {
r, ok := obj.(*RunnerDeployment)
if !ok {
return nil, fmt.Errorf("expected RunnerDeployment object, got %T", obj)
}
runnerDeploymentLog.Info("validate resource to be updated", "name", r.Name)
return nil, r.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerDeployment) ValidateDelete() (admission.Warnings, error) {
func (*RunnerDeploymentValidator) ValidateDelete(context.Context, runtime.Object) (admission.Warnings, error) {
return nil, nil
}

View File

@ -17,6 +17,9 @@ limitations under the License.
package v1alpha1
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
@ -32,36 +35,51 @@ var runnerReplicaSetLog = logf.Log.WithName("runnerreplicaset-resource")
func (r *RunnerReplicaSet) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(r).
WithDefaulter(&RunnerReplicaSetDefaulter{}).
WithValidator(&RunnerReplicaSetValidator{}).
Complete()
}
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=mutate.runnerreplicaset.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
var _ webhook.Defaulter = &RunnerReplicaSet{}
var _ webhook.CustomDefaulter = &RunnerReplicaSetDefaulter{}
type RunnerReplicaSetDefaulter struct{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (r *RunnerReplicaSet) Default() {
func (*RunnerReplicaSetDefaulter) Default(context.Context, runtime.Object) error {
// Nothing to do.
return nil
}
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=validate.runnerreplicaset.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
var _ webhook.Validator = &RunnerReplicaSet{}
var _ webhook.CustomValidator = &RunnerReplicaSetValidator{}
type RunnerReplicaSetValidator struct{}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerReplicaSet) ValidateCreate() (admission.Warnings, error) {
func (*RunnerReplicaSetValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) {
r, ok := obj.(*RunnerReplicaSet)
if !ok {
return nil, fmt.Errorf("expected RunnerReplicaSet object, got %T", obj)
}
runnerReplicaSetLog.Info("validate resource to be created", "name", r.Name)
return nil, r.Validate()
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerReplicaSet) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
func (*RunnerReplicaSetValidator) ValidateUpdate(ctx context.Context, old, obj runtime.Object) (admission.Warnings, error) {
r, ok := obj.(*RunnerReplicaSet)
if !ok {
return nil, fmt.Errorf("expected RunnerReplicaSet object, got %T", obj)
}
runnerReplicaSetLog.Info("validate resource to be updated", "name", r.Name)
return nil, r.Validate()
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (r *RunnerReplicaSet) ValidateDelete() (admission.Warnings, error) {
func (*RunnerReplicaSetValidator) ValidateDelete(context.Context, runtime.Object) (admission.Warnings, error) {
return nil, nil
}

View File

@ -467,6 +467,21 @@ func (in *RunnerConfig) DeepCopy() *RunnerConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerDefaulter) DeepCopyInto(out *RunnerDefaulter) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerDefaulter.
func (in *RunnerDefaulter) DeepCopy() *RunnerDefaulter {
if in == nil {
return nil
}
out := new(RunnerDefaulter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerDeployment) DeepCopyInto(out *RunnerDeployment) {
*out = *in
@ -494,6 +509,21 @@ func (in *RunnerDeployment) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerDeploymentDefaulter) DeepCopyInto(out *RunnerDeploymentDefaulter) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerDeploymentDefaulter.
func (in *RunnerDeploymentDefaulter) DeepCopy() *RunnerDeploymentDefaulter {
if in == nil {
return nil
}
out := new(RunnerDeploymentDefaulter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerDeploymentList) DeepCopyInto(out *RunnerDeploymentList) {
*out = *in
@ -596,6 +626,21 @@ func (in *RunnerDeploymentStatus) DeepCopy() *RunnerDeploymentStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerDeploymentValidator) DeepCopyInto(out *RunnerDeploymentValidator) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerDeploymentValidator.
func (in *RunnerDeploymentValidator) DeepCopy() *RunnerDeploymentValidator {
if in == nil {
return nil
}
out := new(RunnerDeploymentValidator)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerList) DeepCopyInto(out *RunnerList) {
*out = *in
@ -815,6 +860,21 @@ func (in *RunnerReplicaSet) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerReplicaSetDefaulter) DeepCopyInto(out *RunnerReplicaSetDefaulter) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSetDefaulter.
func (in *RunnerReplicaSetDefaulter) DeepCopy() *RunnerReplicaSetDefaulter {
if in == nil {
return nil
}
out := new(RunnerReplicaSetDefaulter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerReplicaSetList) DeepCopyInto(out *RunnerReplicaSetList) {
*out = *in
@ -907,6 +967,21 @@ func (in *RunnerReplicaSetStatus) DeepCopy() *RunnerReplicaSetStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerReplicaSetValidator) DeepCopyInto(out *RunnerReplicaSetValidator) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSetValidator.
func (in *RunnerReplicaSetValidator) DeepCopy() *RunnerReplicaSetValidator {
if in == nil {
return nil
}
out := new(RunnerReplicaSetValidator)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerSet) DeepCopyInto(out *RunnerSet) {
*out = *in
@ -1112,6 +1187,21 @@ func (in *RunnerTemplate) DeepCopy() *RunnerTemplate {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunnerValidator) DeepCopyInto(out *RunnerValidator) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerValidator.
func (in *RunnerValidator) DeepCopy() *RunnerValidator {
if in == nil {
return nil
}
out := new(RunnerValidator)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleTargetRef) DeepCopyInto(out *ScaleTargetRef) {
*out = *in

View File

@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
controller-gen.kubebuilder.io/version: v0.17.2
name: horizontalrunnerautoscalers.actions.summerwind.dev
spec:
group: actions.summerwind.dev
@ -159,10 +159,8 @@ spec:
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you

View File

@ -6,17 +6,17 @@
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "actions-runner-controller.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
export NODE_PORT=$(kubectl get --namespace {{ include "actions-runner-controller.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "actions-runner-controller.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ include "actions-runner-controller.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "actions-runner-controller.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "actions-runner-controller.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
You can watch the status of by running 'kubectl get --namespace {{ include "actions-runner-controller.namespace" . }} svc -w {{ include "actions-runner-controller.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ include "actions-runner-controller.namespace" . }} {{ include "actions-runner-controller.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "actions-runner-controller.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
export POD_NAME=$(kubectl get pods --namespace {{ include "actions-runner-controller.namespace" . }} -l "app.kubernetes.io/name={{ include "actions-runner-controller.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ include "actions-runner-controller.namespace" . }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
kubectl --namespace {{ include "actions-runner-controller.namespace" . }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@ -1,3 +1,14 @@
{{/*
Allow overriding the namespace for the resources.
*/}}
{{- define "actions-runner-controller.namespace" -}}
{{- if .Values.namespaceOverride }}
{{- .Values.namespaceOverride }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}
{{/*
Expand the name of the chart.
*/}}

View File

@ -3,7 +3,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
spec:

View File

@ -5,7 +5,7 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.actionsMetricsServer.ingress.annotations }}

View File

@ -10,5 +10,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
{{- end }}

View File

@ -4,7 +4,7 @@ apiVersion: v1
kind: Secret
metadata:
name: {{ include "actions-runner-controller-actions-metrics-server.secretName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
type: Opaque

View File

@ -3,7 +3,7 @@ apiVersion: v1
kind: Service
metadata:
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
{{- if .Values.actionsMetricsServer.service.annotations }}

View File

@ -4,7 +4,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.actionsMetricsServer.serviceAccount.annotations }}

View File

@ -1,5 +1,5 @@
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor.enable }}
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default .Release.Namespace }}
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default (include "actions-runner-controller.namespace" .) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:

View File

@ -10,5 +10,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
{{- end }}

View File

@ -6,7 +6,7 @@ apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "actions-runner-controller.selfsignedIssuerName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
spec:
selfSigned: {}
---
@ -14,11 +14,11 @@ apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "actions-runner-controller.servingCertName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
spec:
dnsNames:
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ .Release.Namespace }}.svc
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ .Release.Namespace }}.svc.cluster.local
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ include "actions-runner-controller.namespace" . }}.svc
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ include "actions-runner-controller.namespace" . }}.svc.cluster.local
issuerRef:
kind: Issuer
name: {{ include "actions-runner-controller.selfsignedIssuerName" . }}

View File

@ -4,7 +4,7 @@ metadata:
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
name: {{ include "actions-runner-controller.metricsServiceName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
{{- with .Values.metrics.serviceAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}

View File

@ -8,7 +8,7 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "actions-runner-controller.serviceMonitorName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
spec:
endpoints:
- path: /metrics

View File

@ -5,7 +5,7 @@ metadata:
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
name: {{ include "actions-runner-controller.pdbName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
spec:
{{- if .Values.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}

View File

@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "actions-runner-controller.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
spec:
@ -56,7 +56,7 @@ spec:
- "--docker-registry-mirror={{ .Values.dockerRegistryMirror }}"
{{- end }}
{{- if .Values.scope.singleNamespace }}
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
- "--watch-namespace={{ default (include "actions-runner-controller.namespace" .) .Values.scope.watchNamespace }}"
{{- end }}
{{- if .Values.logLevel }}
- "--log-level={{ .Values.logLevel }}"

View File

@ -3,7 +3,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
spec:
@ -43,7 +43,7 @@ spec:
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
{{- end }}
{{- if .Values.scope.singleNamespace }}
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
- "--watch-namespace={{ default (include "actions-runner-controller.namespace" .) .Values.scope.watchNamespace }}"
{{- end }}
{{- if .Values.runnerGithubURL }}
- "--runner-github-url={{ .Values.runnerGithubURL }}"

View File

@ -5,7 +5,7 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.githubWebhookServer.ingress.annotations }}

View File

@ -5,7 +5,7 @@ metadata:
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
name: {{ include "actions-runner-controller-github-webhook-server.pdbName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
spec:
{{- if .Values.githubWebhookServer.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.githubWebhookServer.podDisruptionBudget.minAvailable }}

View File

@ -10,5 +10,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
{{- end }}

View File

@ -4,7 +4,7 @@ apiVersion: v1
kind: Secret
metadata:
name: {{ include "actions-runner-controller-github-webhook-server.secretName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
type: Opaque

View File

@ -3,7 +3,7 @@ apiVersion: v1
kind: Service
metadata:
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
{{- if .Values.githubWebhookServer.service.annotations }}

View File

@ -1,5 +1,5 @@
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor.enable }}
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default .Release.Namespace }}
{{- $servicemonitornamespace := .Values.actionsMetrics.serviceMonitor.namespace | default (include "actions-runner-controller.namespace" .) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:

View File

@ -4,7 +4,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.githubWebhookServer.serviceAccount.annotations }}

View File

@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "actions-runner-controller.leaderElectionRoleName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
rules:
- apiGroups:
- ""

View File

@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "actions-runner-controller.leaderElectionRoleName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@ -10,4 +10,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}

View File

@ -9,4 +9,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}

View File

@ -6,7 +6,7 @@ kind: ClusterRoleBinding
{{- end }}
metadata:
name: {{ include "actions-runner-controller.managerRoleName" . }}-secrets
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
{{- if .Values.scope.singleNamespace }}
@ -18,4 +18,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}

View File

@ -3,7 +3,7 @@ apiVersion: v1
kind: Secret
metadata:
name: {{ include "actions-runner-controller.secretName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
{{- if .Values.authSecret.annotations }}
annotations:
{{ toYaml .Values.authSecret.annotations | nindent 4 }}

View File

@ -3,7 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "actions-runner-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}

View File

@ -2,7 +2,7 @@
We will use a self managed CA if one is not provided by cert-manager
*/}}
{{- $ca := genCA "actions-runner-ca" 3650 }}
{{- $cert := genSignedCert (printf "%s.%s.svc" (include "actions-runner-controller.webhookServiceName" .) .Release.Namespace) nil (list (printf "%s.%s.svc" (include "actions-runner-controller.webhookServiceName" .) .Release.Namespace)) 3650 $ca }}
{{- $cert := genSignedCert (printf "%s.%s.svc" (include "actions-runner-controller.webhookServiceName" .) (include "actions-runner-controller.namespace" .)) nil (list (printf "%s.%s.svc" (include "actions-runner-controller.webhookServiceName" .) (include "actions-runner-controller.namespace" .))) 3650 $ca }}
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
@ -11,7 +11,7 @@ metadata:
name: {{ include "actions-runner-controller.fullname" . }}-mutating-webhook-configuration
{{- if .Values.certManagerEnabled }}
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
cert-manager.io/inject-ca-from: {{ include "actions-runner-controller.namespace" . }}/{{ include "actions-runner-controller.servingCertName" . }}
{{- end }}
webhooks:
- admissionReviewVersions:
@ -19,7 +19,7 @@ webhooks:
{{- if .Values.scope.singleNamespace }}
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
kubernetes.io/metadata.name: {{ default (include "actions-runner-controller.namespace" .) .Values.scope.watchNamespace }}
{{- end }}
clientConfig:
{{- if .Values.admissionWebHooks.caBundle }}
@ -29,7 +29,7 @@ webhooks:
{{- end }}
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
path: /mutate-actions-summerwind-dev-v1alpha1-runner
failurePolicy: Fail
name: mutate.runner.actions.summerwind.dev
@ -50,7 +50,7 @@ webhooks:
{{- if .Values.scope.singleNamespace }}
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
kubernetes.io/metadata.name: {{ default (include "actions-runner-controller.namespace" .) .Values.scope.watchNamespace }}
{{- end }}
clientConfig:
{{- if .Values.admissionWebHooks.caBundle }}
@ -60,7 +60,7 @@ webhooks:
{{- end }}
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
path: /mutate-actions-summerwind-dev-v1alpha1-runnerdeployment
failurePolicy: Fail
name: mutate.runnerdeployment.actions.summerwind.dev
@ -81,7 +81,7 @@ webhooks:
{{- if .Values.scope.singleNamespace }}
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
kubernetes.io/metadata.name: {{ default (include "actions-runner-controller.namespace" .) .Values.scope.watchNamespace }}
{{- end }}
clientConfig:
{{- if .Values.admissionWebHooks.caBundle }}
@ -91,7 +91,7 @@ webhooks:
{{- end }}
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
path: /mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset
failurePolicy: Fail
name: mutate.runnerreplicaset.actions.summerwind.dev
@ -112,7 +112,7 @@ webhooks:
{{- if .Values.scope.singleNamespace }}
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
kubernetes.io/metadata.name: {{ default (include "actions-runner-controller.namespace" .) .Values.scope.watchNamespace }}
{{- end }}
clientConfig:
{{- if .Values.admissionWebHooks.caBundle }}
@ -122,7 +122,7 @@ webhooks:
{{- end }}
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
path: /mutate-runner-set-pod
failurePolicy: Fail
name: mutate-runner-pod.webhook.actions.summerwind.dev
@ -148,7 +148,7 @@ metadata:
name: {{ include "actions-runner-controller.fullname" . }}-validating-webhook-configuration
{{- if .Values.certManagerEnabled }}
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
cert-manager.io/inject-ca-from: {{ include "actions-runner-controller.namespace" . }}/{{ include "actions-runner-controller.servingCertName" . }}
{{- end }}
webhooks:
- admissionReviewVersions:
@ -156,7 +156,7 @@ webhooks:
{{- if .Values.scope.singleNamespace }}
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
kubernetes.io/metadata.name: {{ default (include "actions-runner-controller.namespace" .) .Values.scope.watchNamespace }}
{{- end }}
clientConfig:
{{- if .Values.admissionWebHooks.caBundle }}
@ -166,7 +166,7 @@ webhooks:
{{- end }}
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
path: /validate-actions-summerwind-dev-v1alpha1-runner
failurePolicy: Fail
name: validate.runner.actions.summerwind.dev
@ -187,7 +187,7 @@ webhooks:
{{- if .Values.scope.singleNamespace }}
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
kubernetes.io/metadata.name: {{ default (include "actions-runner-controller.namespace" .) .Values.scope.watchNamespace }}
{{- end }}
clientConfig:
{{- if .Values.admissionWebHooks.caBundle }}
@ -197,7 +197,7 @@ webhooks:
{{- end }}
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
path: /validate-actions-summerwind-dev-v1alpha1-runnerdeployment
failurePolicy: Fail
name: validate.runnerdeployment.actions.summerwind.dev
@ -218,7 +218,7 @@ webhooks:
{{- if .Values.scope.singleNamespace }}
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{ default .Release.Namespace .Values.scope.watchNamespace }}
kubernetes.io/metadata.name: {{ default (include "actions-runner-controller.namespace" .) .Values.scope.watchNamespace }}
{{- end }}
clientConfig:
{{- if .Values.admissionWebHooks.caBundle }}
@ -228,7 +228,7 @@ webhooks:
{{- end }}
service:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
path: /validate-actions-summerwind-dev-v1alpha1-runnerreplicaset
failurePolicy: Fail
name: validate.runnerreplicaset.actions.summerwind.dev
@ -250,7 +250,7 @@ apiVersion: v1
kind: Secret
metadata:
name: {{ include "actions-runner-controller.servingCertName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
type: kubernetes.io/tls

View File

@ -2,7 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: {{ include "actions-runner-controller.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "actions-runner-controller.namespace" . }}
labels:
{{- include "actions-runner-controller.labels" . | nindent 4 }}
{{- with .Values.service.annotations }}

View File

@ -420,3 +420,6 @@ actionsMetricsServer:
# - chart-example.local
terminationGracePeriodSeconds: 10
lifecycle: {}
# Add the option to deploy in another namespace rather than .Release.Namespace.
namespaceOverride: ""

View File

@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.1
version: 0.11.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.1"
appVersion: "0.11.0"
home: https://github.com/actions/actions-runner-controller

View File

@ -1,5 +1,3 @@
Thank you for installing {{ .Chart.Name }}.
Your release is named {{ .Release.Name }}.
WARNING: Older version of the listener (githubrunnerscalesetlistener) is deprecated and will be removed in the future gha-runner-scale-set-0.10.0 release. If you are using environment variable override to force the old listener, please remove the environment variable and use the new listener (ghalistener) instead.

View File

@ -7,6 +7,17 @@ Expand the name of the chart.
gha-rs-controller
{{- end }}
{{/*
Allow overriding the namespace for the resources.
*/}}
{{- define "gha-runner-scale-set-controller.namespace" -}}
{{- if .Values.namespaceOverride }}
{{- .Values.namespaceOverride }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set-controller.name" -}}
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
@ -57,7 +68,7 @@ Selector labels
*/}}
{{- define "gha-runner-scale-set-controller.selectorLabels" -}}
app.kubernetes.io/name: {{ include "gha-runner-scale-set-controller.name" . }}
app.kubernetes.io/namespace: {{ .Release.Namespace }}
app.kubernetes.io/namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -2,10 +2,10 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "gha-runner-scale-set-controller.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
labels:
{{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }}
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
actions.github.com/controller-service-account-namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
{{- if .Values.flags.watchSingleNamespace }}
actions.github.com/controller-watch-single-namespace: {{ .Values.flags.watchSingleNamespace }}
@ -25,7 +25,7 @@ spec:
labels:
app.kubernetes.io/part-of: gha-rs-controller
app.kubernetes.io/component: controller-manager
app.kubernetes.io/version: {{ .Chart.Version }}
app.kubernetes.io/version: {{ .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
@ -65,6 +65,9 @@ spec:
{{- with .Values.flags.watchSingleNamespace }}
- "--watch-single-namespace={{ . }}"
{{- end }}
{{- with .Values.flags.runnerMaxConcurrentReconciles }}
- "--runner-max-concurrent-reconciles={{ . }}"
{{- end }}
{{- with .Values.flags.updateStrategy }}
- "--update-strategy={{ . }}"
{{- end }}
@ -82,6 +85,12 @@ spec:
{{- range .Values.flags.excludeLabelPropagationPrefixes }}
- "--exclude-label-propagation-prefix={{ . }}"
{{- end }}
{{- with .Values.flags.k8sClientRateLimiterQPS }}
- "--k8s-client-rate-limiter-qps={{ . }}"
{{- end }}
{{- with .Values.flags.k8sClientRateLimiterBurst }}
- "--k8s-client-rate-limiter-burst={{ . }}"
{{- end }}
command:
- "/manager"
{{- with .Values.metrics }}

View File

@ -4,9 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set-controller.leaderElectionRoleName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{- end }}
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
{{- end }}

View File

@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.leaderElectionRoleBinding" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@ -11,5 +11,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
{{- end }}

View File

@ -10,5 +10,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
{{- end }}

View File

@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
rules:
- apiGroups:
- ""

View File

@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.managerListenerRoleBinding" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@ -10,4 +10,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}

View File

@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
rules:
- apiGroups:
- actions.github.com

View File

@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@ -11,5 +11,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
{{- end }}

View File

@ -11,5 +11,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
{{- end }}

View File

@ -3,7 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set-controller.namespace" . }}
labels:
{{- include "gha-runner-scale-set-controller.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}

View File

@ -17,6 +17,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Chart struct {
@ -366,6 +367,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
"--metrics-addr=0",
"--listener-metrics-addr=0",
"--listener-metrics-endpoint=",
"--runner-max-concurrent-reconciles=2",
}
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
@ -518,6 +520,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
"--listener-metrics-addr=0",
"--listener-metrics-endpoint=",
"--metrics-addr=0",
"--runner-max-concurrent-reconciles=2",
}
assert.ElementsMatch(t, expectArgs, deployment.Spec.Template.Spec.Containers[0].Args)
@ -646,6 +649,7 @@ func TestTemplate_EnableLeaderElection(t *testing.T) {
"--listener-metrics-addr=0",
"--listener-metrics-endpoint=",
"--metrics-addr=0",
"--runner-max-concurrent-reconciles=2",
}
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
@ -686,6 +690,7 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
"--listener-metrics-addr=0",
"--listener-metrics-endpoint=",
"--metrics-addr=0",
"--runner-max-concurrent-reconciles=2",
}
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
@ -776,6 +781,7 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
"--listener-metrics-addr=0",
"--listener-metrics-endpoint=",
"--metrics-addr=0",
"--runner-max-concurrent-reconciles=2",
}
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
@ -1073,3 +1079,146 @@ func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
}
func TestNamespaceOverride(t *testing.T) {
t.Parallel()
chartPath := "../../gha-runner-scale-set-controller"
releaseName := "test"
releaseNamespace := "test-" + strings.ToLower(random.UniqueId())
namespaceOverride := "test-" + strings.ToLower(random.UniqueId())
tt := map[string]struct {
file string
options *helm.Options
wantNamespace string
}{
"deployment": {
file: "deployment.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
wantNamespace: namespaceOverride,
},
"leader_election_role_binding": {
file: "leader_election_role_binding.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"replicaCount": "2",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
wantNamespace: namespaceOverride,
},
"leader_election_role": {
file: "leader_election_role.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"replicaCount": "2",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
wantNamespace: namespaceOverride,
},
"manager_listener_role_binding": {
file: "manager_listener_role_binding.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"replicaCount": "2",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
wantNamespace: namespaceOverride,
},
"manager_listener_role": {
file: "manager_listener_role.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"replicaCount": "2",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
wantNamespace: namespaceOverride,
},
"manager_single_namespace_controller_role": {
file: "manager_single_namespace_controller_role.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"flags.watchSingleNamespace": "true",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
wantNamespace: namespaceOverride,
},
"manager_single_namespace_controller_role_binding": {
file: "manager_single_namespace_controller_role_binding.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"flags.watchSingleNamespace": "true",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
wantNamespace: namespaceOverride,
},
"manager_single_namespace_watch_role": {
file: "manager_single_namespace_watch_role.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"flags.watchSingleNamespace": "target-ns",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
wantNamespace: "target-ns",
},
"manager_single_namespace_watch_role_binding": {
file: "manager_single_namespace_watch_role_binding.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"flags.watchSingleNamespace": "target-ns",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
wantNamespace: "target-ns",
},
}
for name, tc := range tt {
c := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
templateFile := filepath.Join("./templates", c.file)
output, err := helm.RenderTemplateE(t, c.options, chartPath, releaseName, []string{templateFile})
if err != nil {
t.Errorf("Error rendering template %s from chart %s: %s", c.file, chartPath, err)
}
type object struct {
Metadata metav1.ObjectMeta
}
var renderedObject object
helm.UnmarshalK8SYaml(t, output, &renderedObject)
assert.Equal(t, tc.wantNamespace, renderedObject.Metadata.Namespace)
})
}
}

View File

@ -104,6 +104,11 @@ flags:
## Defaults to watch all namespaces when unset.
watchSingleNamespace: "prosper-gha-runners"
## The maximum number of concurrent reconciles which can be run by the EphemeralRunner controller.
# Increase this value to improve the throughput of the controller.
# It may also increase the load on the API server and the external service (e.g. GitHub API).
runnerMaxConcurrentReconciles: 2
## Defines how the controller should handle upgrades while having running jobs.
##
## The strategies available are:
@ -128,3 +133,10 @@ flags:
## Labels that match prefix specified in the list are excluded from propagation.
# excludeLabelPropagationPrefixes:
# - "argocd.argoproj.io/instance"
# Overrides the default `.Release.Namespace` for all resources in this chart.
namespaceOverride: ""
## Defines the K8s client rate limiter parameters.
# k8sClientRateLimiterQPS: 20
# k8sClientRateLimiterBurst: 30

View File

@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.1
version: 0.11.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.1"
appVersion: "0.11.0"
home: https://github.com/actions/actions-runner-controller

View File

@ -43,7 +43,7 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: gha-rs
actions.github.com/scale-set-name: {{ include "gha-runner-scale-set.scale-set-name" . }}
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
actions.github.com/scale-set-namespace: {{ include "gha-runner-scale-set.namespace" . }}
{{- end }}
{{/*
@ -87,7 +87,7 @@ app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }
{{- if eq $val.name "runner" }}
image: {{ $val.image }}
command: ["cp"]
args: ["-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
args: ["-r", "/home/runner/externals/.", "/home/runner/tmpDir/"]
volumeMounts:
- name: dind-externals
mountPath: /home/runner/tmpDir
@ -138,7 +138,7 @@ volumeMounts:
{{- range $i, $volume := .Values.template.spec.volumes }}
{{- if eq $volume.name "work" }}
{{- $createWorkVolume = 0 }}
- {{ $volume | toYaml | nindent 2 }}
- {{ $volume | toYaml | nindent 2 | trim }}
{{- end }}
{{- end }}
{{- if eq $createWorkVolume 1 }}
@ -152,7 +152,7 @@ volumeMounts:
{{- range $i, $volume := .Values.template.spec.volumes }}
{{- if eq $volume.name "work" }}
{{- $createWorkVolume = 0 }}
- {{ $volume | toYaml | nindent 2 }}
- {{ $volume | toYaml | nindent 2 | trim }}
{{- end }}
{{- end }}
{{- if eq $createWorkVolume 1 }}
@ -167,7 +167,7 @@ volumeMounts:
{{- define "gha-runner-scale-set.non-work-volumes" -}}
{{- range $i, $volume := .Values.template.spec.volumes }}
{{- if ne $volume.name "work" }}
- {{ $volume | toYaml | nindent 2 }}
- {{ $volume | toYaml | nindent 2 | trim }}
{{- end }}
{{- end }}
{{- end }}
@ -220,7 +220,7 @@ env:
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
{{- $setRunnerUpdateCaCerts = 0 }}
{{- end }}
- {{ $env | toYaml | nindent 4 }}
- {{ $env | toYaml | nindent 4 | trim }}
{{- end }}
{{- end }}
{{- if $setDockerHost }}
@ -257,7 +257,7 @@ volumeMounts:
{{- if eq $volMount.name "github-server-tls-cert" }}
{{- $mountGitHubServerTLS = 0 }}
{{- end }}
- {{ $volMount | toYaml | nindent 4 }}
- {{ $volMount | toYaml | nindent 4 | trim }}
{{- end }}
{{- end }}
{{- if $mountWork }}
@ -483,8 +483,8 @@ volumeMounts:
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
{{- end }}
{{- else if gt $singleNamespaceCounter 0 }}
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
{{- if hasKey $singleNamespaceControllerDeployments (include "gha-runner-scale-set.namespace" .) }}
{{- $controllerDeployment = get $singleNamespaceControllerDeployments (include "gha-runner-scale-set.namespace" .) }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
{{- end }}
@ -540,8 +540,8 @@ volumeMounts:
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }}
{{- else if gt $singleNamespaceCounter 0 }}
{{- if hasKey $singleNamespaceControllerDeployments .Release.Namespace }}
{{- $controllerDeployment = get $singleNamespaceControllerDeployments .Release.Namespace }}
{{- if hasKey $singleNamespaceControllerDeployments (include "gha-runner-scale-set.namespace" .) }}
{{- $controllerDeployment = get $singleNamespaceControllerDeployments (include "gha-runner-scale-set.namespace" .) }}
{{- with $controllerDeployment.metadata }}
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
{{- end }}
@ -555,3 +555,11 @@ volumeMounts:
{{- $managerServiceAccountNamespace }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.namespace" -}}
{{- if .Values.namespaceOverride }}
{{- .Values.namespaceOverride }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}

View File

@ -1,18 +1,35 @@
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.autoscalingRunnerSet) }}
apiVersion: actions.github.com/v1alpha1
kind: AutoscalingRunnerSet
metadata:
{{- if or (not (include "gha-runner-scale-set.scale-set-name" .)) (gt (len (include "gha-runner-scale-set.scale-set-name" .)) 45) }}
{{ fail "Name must have up to 45 characters" }}
{{- end }}
{{- if gt (len .Release.Namespace) 63 }}
{{- if gt (len (include "gha-runner-scale-set.namespace" .)) 63 }}
{{ fail "Namespace must have up to 63 characters" }}
{{- end }}
name: {{ include "gha-runner-scale-set.scale-set-name" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.autoscalingRunnerSet.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
app.kubernetes.io/component: "autoscaling-runner-set"
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations:
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.autoscalingRunnerSet.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
actions.github.com/values-hash: {{ toJson .Values | sha256sum | trunc 63 }}
{{- $containerMode := .Values.containerMode }}
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
@ -89,11 +106,16 @@ spec:
minRunners: {{ .Values.minRunners | int }}
{{- end }}
{{- with .Values.listenerTemplate}}
{{- with .Values.listenerTemplate }}
listenerTemplate:
{{- toYaml . | nindent 4}}
{{- end }}
{{- with .Values.listenerMetrics }}
listenerMetrics:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
{{- with .Values.template.metadata }}
metadata:

View File

@ -1,11 +1,29 @@
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.githubConfigSecret) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "gha-runner-scale-set.githubsecret" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.githubConfigSecret.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations:
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.githubConfigSecret.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
finalizers:
- actions.github.com/cleanup-protection
data:

View File

@ -1,11 +1,31 @@
{{- $containerMode := .Values.containerMode }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRole) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
# default permission for runner pod service account in kubernetes mode (container hook)
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.kubernetesModeRole.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations:
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.kubernetesModeRole.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
finalizers:
- actions.github.com/cleanup-protection
rules:

View File

@ -1,10 +1,31 @@
{{- $containerMode := .Values.containerMode }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRoleBinding) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.kubernetesModeRoleBinding.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations:
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.kubernetesModeRoleBinding.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
finalizers:
- actions.github.com/cleanup-protection
roleRef:
@ -14,5 +35,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
{{- end }}

View File

@ -1,18 +1,33 @@
{{- $containerMode := .Values.containerMode }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeServiceAccount) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- if .Values.containerMode.kubernetesModeServiceAccount }}
{{- with .Values.containerMode.kubernetesModeServiceAccount.annotations }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
{{- if or .Values.annotations $hasCustomResourceMeta }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.kubernetesModeServiceAccount.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}
labels:
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.kubernetesModeServiceAccount.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
finalizers:
- actions.github.com/cleanup-protection
labels:
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
{{- end }}

View File

@ -1,11 +1,29 @@
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.managerRole) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.managerRole.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
app.kubernetes.io/component: manager-role
annotations:
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.managerRole.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
finalizers:
- actions.github.com/cleanup-protection
rules:

View File

@ -1,11 +1,29 @@
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.managerRoleBinding) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.managerRoleBinding.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
app.kubernetes.io/component: manager-role-binding
annotations:
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.managerRoleBinding.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
finalizers:
- actions.github.com/cleanup-protection
roleRef:

View File

@ -1,12 +1,30 @@
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.noPermissionServiceAccount) }}
{{- $containerMode := .Values.containerMode }}
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.noPermissionServiceAccount.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations:
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.noPermissionServiceAccount.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
finalizers:
- actions.github.com/cleanup-protection
{{- end }}

View File

@ -6,6 +6,8 @@ import (
"strings"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com"
"github.com/gruntwork-io/terratest/modules/helm"
@ -742,37 +744,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testin
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
}
func TestTemplateRenderedKubernetesModeServiceAccountAnnotations(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
testValuesPath, err := filepath.Abs("../tests/values_kubernetes_mode_service_account_annotations.yaml")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
ValuesFiles: []string{testValuesPath},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
var sa corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &sa)
assert.Equal(t, "arn:aws:iam::123456789012:role/sample-role", sa.Annotations["eks.amazonaws.com/role-arn"], "Annotations should be arn:aws:iam::123456789012:role/sample-role")
}
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
t.Parallel()
@ -893,7 +864,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, "init-dind-externals", ars.Spec.Template.Spec.InitContainers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.InitContainers[0].Image)
assert.Equal(t, "cp", ars.Spec.Template.Spec.InitContainers[0].Command[0])
assert.Equal(t, "-r -v /home/runner/externals/. /home/runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " "))
assert.Equal(t, "-r /home/runner/externals/. /home/runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " "))
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
@ -2143,3 +2114,357 @@ func TestAutoscalingRunnerSetAnnotationValuesHash(t *testing.T) {
assert.NotEqual(t, firstHash, secondHash)
assert.LessOrEqual(t, len(secondHash), 63)
}
func TestCustomLabels(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"containerMode.type": "kubernetes",
"controllerServiceAccount.namespace": "arc-system",
`labels.argocd\.argoproj\.io/sync-wave`: `"1"`,
`labels.app\.kubernetes\.io/part-of`: "no-override", // this shouldn't be overwritten
"resourceMeta.autoscalingRunnerSet.labels.ars-custom": "ars-custom-value",
"resourceMeta.githubConfigSecret.labels.gh-custom": "gh-custom-value",
"resourceMeta.kubernetesModeRole.labels.kmr-custom": "kmr-custom-value",
"resourceMeta.kubernetesModeRoleBinding.labels.kmrb-custom": "kmrb-custom-value",
"resourceMeta.kubernetesModeServiceAccount.labels.kmsa-custom": "kmsa-custom-value",
"resourceMeta.managerRole.labels.mr-custom": "mr-custom-value",
"resourceMeta.managerRoleBinding.labels.mrb-custom": "mrb-custom-value",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"})
const targetLabel = "argocd.argoproj.io/sync-wave"
const wantCustomValue = `"1"`
const reservedLabel = "app.kubernetes.io/part-of"
const wantReservedValue = "gha-rs"
var githubSecret corev1.Secret
helm.UnmarshalK8SYaml(t, output, &githubSecret)
assert.Equal(t, wantCustomValue, githubSecret.Labels[targetLabel])
assert.Equal(t, wantReservedValue, githubSecret.Labels[reservedLabel])
assert.Equal(t, "gh-custom-value", githubSecret.Labels["gh-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
var role rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &role)
assert.Equal(t, wantCustomValue, role.Labels[targetLabel])
assert.Equal(t, wantReservedValue, role.Labels[reservedLabel])
assert.Equal(t, "kmr-custom-value", role.Labels["kmr-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"})
var roleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &roleBinding)
assert.Equal(t, wantCustomValue, roleBinding.Labels[targetLabel])
assert.Equal(t, wantReservedValue, roleBinding.Labels[reservedLabel])
assert.Equal(t, "kmrb-custom-value", roleBinding.Labels["kmrb-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, wantCustomValue, ars.Labels[targetLabel])
assert.Equal(t, wantReservedValue, ars.Labels[reservedLabel])
assert.Equal(t, "ars-custom-value", ars.Labels["ars-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
var serviceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
assert.Equal(t, wantCustomValue, serviceAccount.Labels[targetLabel])
assert.Equal(t, wantReservedValue, serviceAccount.Labels[reservedLabel])
assert.Equal(t, "kmsa-custom-value", serviceAccount.Labels["kmsa-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
var managerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerRole)
assert.Equal(t, wantCustomValue, managerRole.Labels[targetLabel])
assert.Equal(t, wantReservedValue, managerRole.Labels[reservedLabel])
assert.Equal(t, "mr-custom-value", managerRole.Labels["mr-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
var managerRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
assert.Equal(t, wantCustomValue, managerRoleBinding.Labels[targetLabel])
assert.Equal(t, wantReservedValue, managerRoleBinding.Labels[reservedLabel])
assert.Equal(t, "mrb-custom-value", managerRoleBinding.Labels["mrb-custom"])
options = &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
`labels.argocd\.argoproj\.io/sync-wave`: `"1"`,
"resourceMeta.noPermissionServiceAccount.labels.npsa-custom": "npsa-custom-value",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/no_permission_serviceaccount.yaml"})
var noPermissionServiceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &noPermissionServiceAccount)
assert.Equal(t, wantCustomValue, noPermissionServiceAccount.Labels[targetLabel])
assert.Equal(t, wantReservedValue, noPermissionServiceAccount.Labels[reservedLabel])
assert.Equal(t, "npsa-custom-value", noPermissionServiceAccount.Labels["npsa-custom"])
}
func TestCustomAnnotations(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
`annotations.argocd\.argoproj\.io/sync-wave`: `"1"`,
"resourceMeta.autoscalingRunnerSet.annotations.ars-custom": "ars-custom-value",
"resourceMeta.githubConfigSecret.annotations.gh-custom": "gh-custom-value",
"resourceMeta.kubernetesModeRole.annotations.kmr-custom": "kmr-custom-value",
"resourceMeta.kubernetesModeRoleBinding.annotations.kmrb-custom": "kmrb-custom-value",
"resourceMeta.kubernetesModeServiceAccount.annotations.kmsa-custom": "kmsa-custom-value",
"resourceMeta.managerRole.annotations.mr-custom": "mr-custom-value",
"resourceMeta.managerRoleBinding.annotations.mrb-custom": "mrb-custom-value",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
const targetAnnotations = "argocd.argoproj.io/sync-wave"
const wantCustomValue = `"1"`
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"})
var githubSecret corev1.Secret
helm.UnmarshalK8SYaml(t, output, &githubSecret)
assert.Equal(t, wantCustomValue, githubSecret.Annotations[targetAnnotations])
assert.Equal(t, "gh-custom-value", githubSecret.Annotations["gh-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
var role rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &role)
assert.Equal(t, wantCustomValue, role.Annotations[targetAnnotations])
assert.Equal(t, "kmr-custom-value", role.Annotations["kmr-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"})
var roleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &roleBinding)
assert.Equal(t, wantCustomValue, roleBinding.Annotations[targetAnnotations])
assert.Equal(t, "kmrb-custom-value", roleBinding.Annotations["kmrb-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, wantCustomValue, ars.Annotations[targetAnnotations])
assert.Equal(t, "ars-custom-value", ars.Annotations["ars-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
var serviceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
assert.Equal(t, wantCustomValue, serviceAccount.Annotations[targetAnnotations])
assert.Equal(t, "kmsa-custom-value", serviceAccount.Annotations["kmsa-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
var managerRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &managerRole)
assert.Equal(t, wantCustomValue, managerRole.Annotations[targetAnnotations])
assert.Equal(t, "mr-custom-value", managerRole.Annotations["mr-custom"])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
var managerRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
assert.Equal(t, wantCustomValue, managerRoleBinding.Annotations[targetAnnotations])
assert.Equal(t, "mrb-custom-value", managerRoleBinding.Annotations["mrb-custom"])
options = &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
`annotations.argocd\.argoproj\.io/sync-wave`: `"1"`,
"resourceMeta.noPermissionServiceAccount.annotations.npsa-custom": "npsa-custom-value",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/no_permission_serviceaccount.yaml"})
var noPermissionServiceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &noPermissionServiceAccount)
assert.Equal(t, wantCustomValue, noPermissionServiceAccount.Annotations[targetAnnotations])
assert.Equal(t, "npsa-custom-value", noPermissionServiceAccount.Annotations["npsa-custom"])
}
func TestNamespaceOverride(t *testing.T) {
t.Parallel()
chartPath := "../../gha-runner-scale-set"
releaseName := "test"
releaseNamespace := "test-" + strings.ToLower(random.UniqueId())
namespaceOverride := "test-" + strings.ToLower(random.UniqueId())
tt := map[string]struct {
file string
options *helm.Options
}{
"manager_role": {
file: "manager_role.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"manager_role_binding": {
file: "manager_role_binding.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"no_permission_serviceaccount": {
file: "no_permission_serviceaccount.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"autoscalingrunnerset": {
file: "autoscalingrunnerset.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"githubsecret": {
file: "githubsecret.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_mode_role": {
file: "kube_mode_role.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"containerMode.type": "kubernetes",
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_mode_role_binding": {
file: "kube_mode_role_binding.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"containerMode.type": "kubernetes",
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_mode_serviceaccount": {
file: "kube_mode_serviceaccount.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"containerMode.type": "kubernetes",
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
}
for name, tc := range tt {
c := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
templateFile := filepath.Join("./templates", c.file)
output, err := helm.RenderTemplateE(t, c.options, chartPath, releaseName, []string{templateFile})
if err != nil {
t.Errorf("Error rendering template %s from chart %s: %s", c.file, chartPath, err)
}
type object struct {
Metadata metav1.ObjectMeta
}
var renderedObject object
helm.UnmarshalK8SYaml(t, output, &renderedObject)
assert.Equal(t, namespaceOverride, renderedObject.Metadata.Namespace)
})
}
}

View File

@ -1,8 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
containerMode:
type: kubernetes
kubernetesModeServiceAccount:
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/sample-role

View File

@ -195,8 +195,8 @@ template:
limits:
cpu: "4"
memory: 8Gi
request:
cpu: "2"
request:
cpu: "2"
memory: 4Gi
## Optional controller service account that needs to have required Role and RoleBinding

View File

@ -23,7 +23,7 @@ type App struct {
// initialized fields
listener Listener
worker Worker
metrics metrics.ServerPublisher
metrics metrics.ServerExporter
}
//go:generate mockery --name Listener --output ./mocks --outpkg mocks --case underscore
@ -69,6 +69,8 @@ func New(config config.Config) (*App, error) {
Repository: ghConfig.Repository,
ServerAddr: config.MetricsAddr,
ServerEndpoint: config.MetricsEndpoint,
Logger: app.logger.WithName("metrics exporter"),
Metrics: *config.Metrics,
})
}

View File

@ -8,6 +8,7 @@ import (
"net/url"
"os"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
@ -16,22 +17,23 @@ import (
)
type Config struct {
ConfigureUrl string `json:"configureUrl"`
AppID int64 `json:"appID"`
AppInstallationID int64 `json:"appInstallationID"`
AppPrivateKey string `json:"appPrivateKey"`
Token string `json:"token"`
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
MaxRunners int `json:"maxRunners"`
MinRunners int `json:"minRunners"`
RunnerScaleSetId int `json:"runnerScaleSetId"`
RunnerScaleSetName string `json:"runnerScaleSetName"`
ServerRootCA string `json:"serverRootCA"`
LogLevel string `json:"logLevel"`
LogFormat string `json:"logFormat"`
MetricsAddr string `json:"metricsAddr"`
MetricsEndpoint string `json:"metricsEndpoint"`
ConfigureUrl string `json:"configure_url"`
AppID int64 `json:"app_id"`
AppInstallationID int64 `json:"app_installation_id"`
AppPrivateKey string `json:"app_private_key"`
Token string `json:"token"`
EphemeralRunnerSetNamespace string `json:"ephemeral_runner_set_namespace"`
EphemeralRunnerSetName string `json:"ephemeral_runner_set_name"`
MaxRunners int `json:"max_runners"`
MinRunners int `json:"min_runners"`
RunnerScaleSetId int `json:"runner_scale_set_id"`
RunnerScaleSetName string `json:"runner_scale_set_name"`
ServerRootCA string `json:"server_root_ca"`
LogLevel string `json:"log_level"`
LogFormat string `json:"log_format"`
MetricsAddr string `json:"metrics_addr"`
MetricsEndpoint string `json:"metrics_endpoint"`
Metrics *v1alpha1.MetricsConfig `json:"metrics"`
}
func Read(path string) (Config, error) {
@ -46,14 +48,15 @@ func Read(path string) (Config, error) {
return Config{}, fmt.Errorf("failed to decode config: %w", err)
}
if err := config.validate(); err != nil {
if err := config.Validate(); err != nil {
return Config{}, fmt.Errorf("failed to validate config: %w", err)
}
return config, nil
}
func (c *Config) validate() error {
// Validate checks the configuration for errors.
func (c *Config) Validate() error {
if len(c.ConfigureUrl) == 0 {
return fmt.Errorf("GitHubConfigUrl is not provided")
}

View File

@ -46,7 +46,7 @@ func TestCustomerServerRootCA(t *testing.T) {
require.NoError(t, err)
certsString = string(rootCA)
intermediate, err := os.ReadFile(filepath.Join(certsFolder, "intermediate.pem"))
intermediate, err := os.ReadFile(filepath.Join(certsFolder, "intermediate.crt"))
require.NoError(t, err)
certsString = certsString + string(intermediate)

View File

@ -17,7 +17,7 @@ func TestConfigValidationMinMax(t *testing.T) {
MaxRunners: 2,
Token: "token",
}
err := config.validate()
err := config.Validate()
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
}
@ -28,7 +28,7 @@ func TestConfigValidationMissingToken(t *testing.T) {
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
err := config.Validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
@ -42,7 +42,7 @@ func TestConfigValidationAppKey(t *testing.T) {
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
err := config.Validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
@ -58,7 +58,7 @@ func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
err := config.Validate()
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
@ -74,7 +74,7 @@ func TestConfigValidation(t *testing.T) {
Token: "asdf",
}
err := config.validate()
err := config.Validate()
assert.NoError(t, err, "Expected no error")
}
@ -86,7 +86,7 @@ func TestConfigValidationConfigUrl(t *testing.T) {
RunnerScaleSetId: 1,
}
err := config.validate()
err := config.Validate()
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}

View File

@ -2,10 +2,12 @@ package metrics
import (
"context"
"errors"
"net/http"
"strconv"
"strings"
"time"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
@ -19,153 +21,349 @@ const (
labelKeyOrganization = "organization"
labelKeyRepository = "repository"
labelKeyJobName = "job_name"
labelKeyJobWorkflowRef = "job_workflow_ref"
labelKeyEventName = "event_name"
labelKeyJobResult = "job_result"
labelKeyRunnerID = "runner_id"
labelKeyRunnerName = "runner_name"
)
const githubScaleSetSubsystem = "gha"
// labels
var (
scaleSetLabels = []string{
labelKeyRunnerScaleSetName,
labelKeyRepository,
labelKeyOrganization,
labelKeyEnterprise,
labelKeyRunnerScaleSetNamespace,
}
jobLabels = []string{
labelKeyRepository,
labelKeyOrganization,
labelKeyEnterprise,
labelKeyJobName,
labelKeyJobWorkflowRef,
labelKeyEventName,
}
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
const (
githubScaleSetSubsystem = "gha"
githubScaleSetSubsystemPrefix = "gha_"
)
// Names of all metrics available on the listener
const (
MetricAssignedJobs = "gha_assigned_jobs"
MetricRunningJobs = "gha_running_jobs"
MetricRegisteredRunners = "gha_registered_runners"
MetricBusyRunners = "gha_busy_runners"
MetricMinRunners = "gha_min_runners"
MetricMaxRunners = "gha_max_runners"
MetricDesiredRunners = "gha_desired_runners"
MetricIdleRunners = "gha_idle_runners"
MetricStartedJobsTotal = "gha_started_jobs_total"
MetricCompletedJobsTotal = "gha_completed_jobs_total"
MetricJobStartupDurationSeconds = "gha_job_startup_duration_seconds"
MetricJobExecutionDurationSeconds = "gha_job_execution_duration_seconds"
)
type metricsHelpRegistry struct {
counters map[string]string
gauges map[string]string
histograms map[string]string
}
var metricsHelp = metricsHelpRegistry{
counters: map[string]string{
MetricStartedJobsTotal: "Total number of jobs started.",
MetricCompletedJobsTotal: "Total number of jobs completed.",
},
gauges: map[string]string{
MetricAssignedJobs: "Number of jobs assigned to this scale set.",
MetricRunningJobs: "Number of jobs running (or about to be run).",
MetricRegisteredRunners: "Number of runners registered by the scale set.",
MetricBusyRunners: "Number of registered runners running a job.",
MetricMinRunners: "Minimum number of runners.",
MetricMaxRunners: "Maximum number of runners.",
MetricDesiredRunners: "Number of runners desired by the scale set.",
MetricIdleRunners: "Number of registered runners not running a job.",
},
histograms: map[string]string{
MetricJobStartupDurationSeconds: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
MetricJobExecutionDurationSeconds: "Time spent executing workflow jobs by the scale set (in seconds).",
},
}
func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
return prometheus.Labels{
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
labelKeyOrganization: jobBase.OwnerName,
labelKeyRepository: jobBase.RepositoryName,
labelKeyJobName: jobBase.JobDisplayName,
labelKeyEventName: jobBase.EventName,
}
}
func (e *exporter) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
l := e.jobLabels(&msg.JobMessageBase)
l[labelKeyJobResult] = msg.Result
return l
}
func (e *exporter) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
return e.jobLabels(&msg.JobMessageBase)
}
//go:generate mockery --name Publisher --output ./mocks --outpkg mocks --case underscore
type Publisher interface {
PublishStatic(min, max int)
PublishStatistics(stats *actions.RunnerScaleSetStatistic)
PublishJobStarted(msg *actions.JobStarted)
PublishJobCompleted(msg *actions.JobCompleted)
PublishDesiredRunners(count int)
}
//go:generate mockery --name ServerPublisher --output ./mocks --outpkg mocks --case underscore
type ServerExporter interface {
Publisher
ListenAndServe(ctx context.Context) error
}
var (
assignedJobs = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "assigned_jobs",
Help: "Number of jobs assigned to this scale set.",
},
scaleSetLabels,
)
runningJobs = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "running_jobs",
Help: "Number of jobs running (or about to be run).",
},
scaleSetLabels,
)
registeredRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "registered_runners",
Help: "Number of runners registered by the scale set.",
},
scaleSetLabels,
)
busyRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "busy_runners",
Help: "Number of registered runners running a job.",
},
scaleSetLabels,
)
minRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "min_runners",
Help: "Minimum number of runners.",
},
scaleSetLabels,
)
maxRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "max_runners",
Help: "Maximum number of runners.",
},
scaleSetLabels,
)
desiredRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "desired_runners",
Help: "Number of runners desired by the scale set.",
},
scaleSetLabels,
)
idleRunners = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: "idle_runners",
Help: "Number of registered runners not running a job.",
},
scaleSetLabels,
)
startedJobsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: githubScaleSetSubsystem,
Name: "started_jobs_total",
Help: "Total number of jobs started.",
},
startedJobsTotalLabels,
)
completedJobsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "completed_jobs_total",
Help: "Total number of jobs completed.",
Subsystem: githubScaleSetSubsystem,
},
completedJobsTotalLabels,
)
jobStartupDurationSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: githubScaleSetSubsystem,
Name: "job_startup_duration_seconds",
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
Buckets: runtimeBuckets,
},
jobStartupDurationLabels,
)
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: githubScaleSetSubsystem,
Name: "job_execution_duration_seconds",
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
Buckets: runtimeBuckets,
},
jobExecutionDurationLabels,
)
_ Publisher = &discard{}
_ ServerExporter = &exporter{}
)
var runtimeBuckets []float64 = []float64{
var Discard Publisher = &discard{}
type exporter struct {
logger logr.Logger
scaleSetLabels prometheus.Labels
*metrics
srv *http.Server
}
type metrics struct {
counters map[string]*counterMetric
gauges map[string]*gaugeMetric
histograms map[string]*histogramMetric
}
type counterMetric struct {
counter *prometheus.CounterVec
config *v1alpha1.CounterMetric
}
type gaugeMetric struct {
gauge *prometheus.GaugeVec
config *v1alpha1.GaugeMetric
}
type histogramMetric struct {
histogram *prometheus.HistogramVec
config *v1alpha1.HistogramMetric
}
type ExporterConfig struct {
ScaleSetName string
ScaleSetNamespace string
Enterprise string
Organization string
Repository string
ServerAddr string
ServerEndpoint string
Logger logr.Logger
Metrics v1alpha1.MetricsConfig
}
func NewExporter(config ExporterConfig) ServerExporter {
reg := prometheus.NewRegistry()
metrics := installMetrics(config.Metrics, reg, config.Logger)
mux := http.NewServeMux()
mux.Handle(
config.ServerEndpoint,
promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}),
)
return &exporter{
logger: config.Logger.WithName("metrics"),
scaleSetLabels: prometheus.Labels{
labelKeyRunnerScaleSetName: config.ScaleSetName,
labelKeyRunnerScaleSetNamespace: config.ScaleSetNamespace,
labelKeyEnterprise: config.Enterprise,
labelKeyOrganization: config.Organization,
labelKeyRepository: config.Repository,
},
metrics: metrics,
srv: &http.Server{
Addr: config.ServerAddr,
Handler: mux,
},
}
}
var errUnknownMetricName = errors.New("unknown metric name")
func installMetrics(config v1alpha1.MetricsConfig, reg *prometheus.Registry, logger logr.Logger) *metrics {
logger.Info(
"Registering metrics",
"gauges",
config.Gauges,
"counters",
config.Counters,
"histograms",
config.Histograms,
)
metrics := &metrics{
counters: make(map[string]*counterMetric, len(config.Counters)),
gauges: make(map[string]*gaugeMetric, len(config.Gauges)),
histograms: make(map[string]*histogramMetric, len(config.Histograms)),
}
for name, cfg := range config.Gauges {
help, ok := metricsHelp.gauges[name]
if !ok {
logger.Error(errUnknownMetricName, "name", name, "kind", "gauge")
continue
}
g := prometheus.V2.NewGaugeVec(prometheus.GaugeVecOpts{
GaugeOpts: prometheus.GaugeOpts{
Subsystem: githubScaleSetSubsystem,
Name: strings.TrimPrefix(name, githubScaleSetSubsystemPrefix),
Help: help,
},
VariableLabels: prometheus.UnconstrainedLabels(cfg.Labels),
})
reg.MustRegister(g)
metrics.gauges[name] = &gaugeMetric{
gauge: g,
config: cfg,
}
}
for name, cfg := range config.Counters {
help, ok := metricsHelp.counters[name]
if !ok {
logger.Error(errUnknownMetricName, "name", name, "kind", "counter")
continue
}
c := prometheus.V2.NewCounterVec(prometheus.CounterVecOpts{
CounterOpts: prometheus.CounterOpts{
Subsystem: githubScaleSetSubsystem,
Name: strings.TrimPrefix(name, githubScaleSetSubsystemPrefix),
Help: help,
},
VariableLabels: prometheus.UnconstrainedLabels(cfg.Labels),
})
reg.MustRegister(c)
metrics.counters[name] = &counterMetric{
counter: c,
config: cfg,
}
}
for name, cfg := range config.Histograms {
help, ok := metricsHelp.histograms[name]
if !ok {
logger.Error(errUnknownMetricName, "name", name, "kind", "histogram")
continue
}
buckets := defaultRuntimeBuckets
if len(cfg.Buckets) > 0 {
buckets = cfg.Buckets
}
h := prometheus.V2.NewHistogramVec(prometheus.HistogramVecOpts{
HistogramOpts: prometheus.HistogramOpts{
Subsystem: githubScaleSetSubsystem,
Name: strings.TrimPrefix(name, githubScaleSetSubsystemPrefix),
Help: help,
Buckets: buckets,
},
VariableLabels: prometheus.UnconstrainedLabels(cfg.Labels),
})
cfg.Buckets = buckets
reg.MustRegister(h)
metrics.histograms[name] = &histogramMetric{
histogram: h,
config: cfg,
}
}
return metrics
}
func (e *exporter) ListenAndServe(ctx context.Context) error {
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
go func() {
<-ctx.Done()
e.logger.Info("stopping metrics server", "err", ctx.Err())
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
e.srv.Shutdown(ctx)
}()
return e.srv.ListenAndServe()
}
func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float64) {
m, ok := e.metrics.gauges[name]
if !ok {
return
}
labels := make(prometheus.Labels, len(m.config.Labels))
for _, label := range m.config.Labels {
labels[label] = allLabels[label]
}
m.gauge.With(labels).Set(val)
}
func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
m, ok := e.metrics.counters[name]
if !ok {
return
}
labels := make(prometheus.Labels, len(m.config.Labels))
for _, label := range m.config.Labels {
labels[label] = allLabels[label]
}
m.counter.With(labels).Inc()
}
func (e *exporter) observeHistogram(name string, allLabels prometheus.Labels, val float64) {
m, ok := e.metrics.histograms[name]
if !ok {
return
}
labels := make(prometheus.Labels, len(m.config.Labels))
for _, label := range m.config.Labels {
labels[label] = allLabels[label]
}
m.histogram.With(labels).Observe(val)
}
func (e *exporter) PublishStatic(min, max int) {
e.setGauge(MetricMaxRunners, e.scaleSetLabels, float64(max))
e.setGauge(MetricMinRunners, e.scaleSetLabels, float64(min))
}
func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
e.setGauge(MetricAssignedJobs, e.scaleSetLabels, float64(stats.TotalAssignedJobs))
e.setGauge(MetricRunningJobs, e.scaleSetLabels, float64(stats.TotalRunningJobs))
e.setGauge(MetricRegisteredRunners, e.scaleSetLabels, float64(stats.TotalRegisteredRunners))
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(float64(stats.TotalRegisteredRunners)))
e.setGauge(MetricIdleRunners, e.scaleSetLabels, float64(stats.TotalIdleRunners))
}
func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
l := e.startedJobLabels(msg)
e.incCounter(MetricStartedJobsTotal, l)
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
e.observeHistogram(MetricJobStartupDurationSeconds, l, float64(startupDuration))
}
func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
l := e.completedJobLabels(msg)
e.incCounter(MetricCompletedJobsTotal, l)
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
e.observeHistogram(MetricJobExecutionDurationSeconds, l, float64(executionDuration))
}
func (e *exporter) PublishDesiredRunners(count int) {
e.setGauge(MetricDesiredRunners, e.scaleSetLabels, float64(count))
}
type discard struct{}
func (*discard) PublishStatic(int, int) {}
func (*discard) PublishStatistics(*actions.RunnerScaleSetStatistic) {}
func (*discard) PublishJobStarted(*actions.JobStarted) {}
func (*discard) PublishJobCompleted(*actions.JobCompleted) {}
func (*discard) PublishDesiredRunners(int) {}
var defaultRuntimeBuckets []float64 = []float64{
0.01,
0.05,
0.1,
@ -212,181 +410,3 @@ var runtimeBuckets []float64 = []float64{
3000,
3600,
}
type baseLabels struct {
scaleSetName string
scaleSetNamespace string
enterprise string
organization string
repository string
}
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
return prometheus.Labels{
labelKeyEnterprise: b.enterprise,
labelKeyOrganization: jobBase.OwnerName,
labelKeyRepository: jobBase.RepositoryName,
labelKeyJobName: jobBase.JobDisplayName,
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
labelKeyEventName: jobBase.EventName,
}
}
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
return prometheus.Labels{
labelKeyRunnerScaleSetName: b.scaleSetName,
labelKeyRunnerScaleSetNamespace: b.scaleSetNamespace,
labelKeyEnterprise: b.enterprise,
labelKeyOrganization: b.organization,
labelKeyRepository: b.repository,
}
}
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
l := b.jobLabels(&msg.JobMessageBase)
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
l[labelKeyJobResult] = msg.Result
l[labelKeyRunnerName] = msg.RunnerName
return l
}
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
l := b.jobLabels(&msg.JobMessageBase)
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
l[labelKeyRunnerName] = msg.RunnerName
return l
}
//go:generate mockery --name Publisher --output ./mocks --outpkg mocks --case underscore
type Publisher interface {
PublishStatic(min, max int)
PublishStatistics(stats *actions.RunnerScaleSetStatistic)
PublishJobStarted(msg *actions.JobStarted)
PublishJobCompleted(msg *actions.JobCompleted)
PublishDesiredRunners(count int)
}
//go:generate mockery --name ServerPublisher --output ./mocks --outpkg mocks --case underscore
type ServerPublisher interface {
Publisher
ListenAndServe(ctx context.Context) error
}
var (
_ Publisher = &discard{}
_ ServerPublisher = &exporter{}
)
var Discard Publisher = &discard{}
type exporter struct {
logger logr.Logger
baseLabels
srv *http.Server
}
type ExporterConfig struct {
ScaleSetName string
ScaleSetNamespace string
Enterprise string
Organization string
Repository string
ServerAddr string
ServerEndpoint string
Logger logr.Logger
}
func NewExporter(config ExporterConfig) ServerPublisher {
reg := prometheus.NewRegistry()
reg.MustRegister(
assignedJobs,
runningJobs,
registeredRunners,
busyRunners,
minRunners,
maxRunners,
desiredRunners,
idleRunners,
startedJobsTotal,
completedJobsTotal,
jobStartupDurationSeconds,
jobExecutionDurationSeconds,
)
mux := http.NewServeMux()
mux.Handle(
config.ServerEndpoint,
promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}),
)
return &exporter{
logger: config.Logger.WithName("metrics"),
baseLabels: baseLabels{
scaleSetName: config.ScaleSetName,
scaleSetNamespace: config.ScaleSetNamespace,
enterprise: config.Enterprise,
organization: config.Organization,
repository: config.Repository,
},
srv: &http.Server{
Addr: config.ServerAddr,
Handler: mux,
},
}
}
func (e *exporter) ListenAndServe(ctx context.Context) error {
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
go func() {
<-ctx.Done()
e.logger.Info("stopping metrics server", "err", ctx.Err())
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
e.srv.Shutdown(ctx)
}()
return e.srv.ListenAndServe()
}
func (m *exporter) PublishStatic(min, max int) {
l := m.scaleSetLabels()
maxRunners.With(l).Set(float64(max))
minRunners.With(l).Set(float64(min))
}
func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
l := e.scaleSetLabels()
assignedJobs.With(l).Set(float64(stats.TotalAssignedJobs))
runningJobs.With(l).Set(float64(stats.TotalRunningJobs))
registeredRunners.With(l).Set(float64(stats.TotalRegisteredRunners))
busyRunners.With(l).Set(float64(stats.TotalBusyRunners))
idleRunners.With(l).Set(float64(stats.TotalIdleRunners))
}
func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
l := e.startedJobLabels(msg)
startedJobsTotal.With(l).Inc()
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
}
func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
l := e.completedJobLabels(msg)
completedJobsTotal.With(l).Inc()
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
jobExecutionDurationSeconds.With(l).Observe(float64(executionDuration))
}
func (m *exporter) PublishDesiredRunners(count int) {
desiredRunners.With(m.scaleSetLabels()).Set(float64(count))
}
type discard struct{}
func (*discard) PublishStatic(int, int) {}
func (*discard) PublishStatistics(*actions.RunnerScaleSetStatistic) {}
func (*discard) PublishJobStarted(*actions.JobStarted) {}
func (*discard) PublishJobCompleted(*actions.JobCompleted) {}
func (*discard) PublishDesiredRunners(int) {}

View File

@ -0,0 +1,88 @@
package metrics
import (
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
)
func TestInstallMetrics(t *testing.T) {
metricsConfig := v1alpha1.MetricsConfig{
Counters: map[string]*v1alpha1.CounterMetric{
// unknown metric shouldn't be registered
"gha_unknown": {
Labels: []string{labelKeyRepository},
},
// gauge metric shouldn't be registered from this section
MetricAssignedJobs: {
Labels: []string{labelKeyRepository},
},
// histogram metric shouldn't be registered from this section
MetricJobStartupDurationSeconds: {
Labels: []string{labelKeyRepository},
},
// counter metric should be registered
MetricStartedJobsTotal: {
Labels: []string{labelKeyRepository},
},
},
Gauges: map[string]*v1alpha1.GaugeMetric{
// unknown metric shouldn't be registered
"gha_unknown": {
Labels: []string{labelKeyRepository},
},
// counter metric shouldn't be registered from this section
MetricStartedJobsTotal: {
Labels: []string{labelKeyRepository},
},
// histogram metric shouldn't be registered from this section
MetricJobStartupDurationSeconds: {
Labels: []string{labelKeyRepository},
},
// gauge metric should be registered
MetricAssignedJobs: {
Labels: []string{labelKeyRepository},
},
},
Histograms: map[string]*v1alpha1.HistogramMetric{
// unknown metric shouldn't be registered
"gha_unknown": {
Labels: []string{labelKeyRepository},
},
// counter metric shouldn't be registered from this section
MetricStartedJobsTotal: {
Labels: []string{labelKeyRepository},
},
// gauge metric shouldn't be registered from this section
MetricAssignedJobs: {
Labels: []string{labelKeyRepository},
},
// histogram metric should be registered
MetricJobExecutionDurationSeconds: {
Labels: []string{labelKeyRepository},
Buckets: []float64{0.1, 1},
},
// histogram metric should be registered with default runtime buckets
MetricJobStartupDurationSeconds: {
Labels: []string{labelKeyRepository},
},
},
}
reg := prometheus.NewRegistry()
got := installMetrics(metricsConfig, reg, logr.Discard())
assert.Len(t, got.counters, 1)
assert.Len(t, got.gauges, 1)
assert.Len(t, got.histograms, 2)
assert.Equal(t, got.counters[MetricStartedJobsTotal].config, metricsConfig.Counters[MetricStartedJobsTotal])
assert.Equal(t, got.gauges[MetricAssignedJobs].config, metricsConfig.Gauges[MetricAssignedJobs])
assert.Equal(t, got.histograms[MetricJobExecutionDurationSeconds].config, metricsConfig.Histograms[MetricJobExecutionDurationSeconds])
duration := got.histograms[MetricJobStartupDurationSeconds]
assert.Equal(t, duration.config.Labels, metricsConfig.Histograms[MetricJobStartupDurationSeconds].Labels)
assert.Equal(t, duration.config.Buckets, defaultRuntimeBuckets)
}

View File

@ -1,129 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
type AutoScalerKubernetesManager struct {
*kubernetes.Clientset
logger logr.Logger
}
func NewKubernetesManager(logger *logr.Logger) (*AutoScalerKubernetesManager, error) {
conf, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
kubeClient, err := kubernetes.NewForConfig(conf)
if err != nil {
return nil, err
}
var manager = &AutoScalerKubernetesManager{
Clientset: kubeClient,
logger: logger.WithName("KubernetesManager"),
}
return manager, nil
}
func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error {
original := &v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: -1,
},
}
originalJson, err := json.Marshal(original)
if err != nil {
k.logger.Error(err, "could not marshal empty ephemeral runner set")
}
patch := &v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: runnerCount,
},
}
patchJson, err := json.Marshal(patch)
if err != nil {
k.logger.Error(err, "could not marshal patch ephemeral runner set")
}
mergePatch, err := jsonpatch.CreateMergePatch(originalJson, patchJson)
if err != nil {
k.logger.Error(err, "could not create merge patch json for ephemeral runner set")
}
k.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch))
patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{}
err = k.RESTClient().
Patch(types.MergePatchType).
Prefix("apis", "actions.github.com", "v1alpha1").
Namespace(namespace).
Resource("EphemeralRunnerSets").
Name(resourceName).
Body([]byte(mergePatch)).
Do(ctx).
Into(patchedEphemeralRunnerSet)
if err != nil {
return fmt.Errorf("could not patch ephemeral runner set , patch JSON: %s, error: %w", string(mergePatch), err)
}
k.logger.Info("Ephemeral runner set scaled.", "namespace", namespace, "name", resourceName, "replicas", patchedEphemeralRunnerSet.Spec.Replicas)
return nil
}
func (k *AutoScalerKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, workflowRunId, jobRequestId int64) error {
original := &v1alpha1.EphemeralRunner{}
originalJson, err := json.Marshal(original)
if err != nil {
return fmt.Errorf("could not marshal empty ephemeral runner, error: %w", err)
}
patch := &v1alpha1.EphemeralRunner{
Status: v1alpha1.EphemeralRunnerStatus{
JobRequestId: jobRequestId,
JobRepositoryName: fmt.Sprintf("%s/%s", ownerName, repositoryName),
WorkflowRunId: workflowRunId,
JobWorkflowRef: jobWorkflowRef,
JobDisplayName: jobDisplayName,
},
}
patchedJson, err := json.Marshal(patch)
if err != nil {
return fmt.Errorf("could not marshal patched ephemeral runner, error: %w", err)
}
mergePatch, err := jsonpatch.CreateMergePatch(originalJson, patchedJson)
if err != nil {
k.logger.Error(err, "could not create merge patch json for ephemeral runner")
}
k.logger.Info("Created merge patch json for EphemeralRunner status update", "json", string(mergePatch))
patchedStatus := &v1alpha1.EphemeralRunner{}
err = k.RESTClient().
Patch(types.MergePatchType).
Prefix("apis", "actions.github.com", "v1alpha1").
Namespace(namespace).
Resource("EphemeralRunners").
Name(resourceName).
SubResource("status").
Body(mergePatch).
Do(ctx).
Into(patchedStatus)
if err != nil {
return fmt.Errorf("could not patch ephemeral runner status, patch JSON: %s, error: %w", string(mergePatch), err)
}
return nil
}

View File

@ -1,191 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"net/http"
"os"
"time"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/google/uuid"
"github.com/pkg/errors"
)
const (
sessionCreationMaxRetryCount = 10
)
type devContextKey bool
var testIgnoreSleep devContextKey = true
type AutoScalerClient struct {
client actions.SessionService
logger logr.Logger
lastMessageId int64
initialMessage *actions.RunnerScaleSetMessage
}
func NewAutoScalerClient(
ctx context.Context,
client actions.ActionsService,
logger *logr.Logger,
runnerScaleSetId int,
options ...func(*AutoScalerClient),
) (*AutoScalerClient, error) {
listener := AutoScalerClient{
logger: logger.WithName("auto_scaler"),
}
session, initialMessage, err := createSession(ctx, &listener.logger, client, runnerScaleSetId)
if err != nil {
return nil, fmt.Errorf("fail to create session. %w", err)
}
listener.lastMessageId = 0
listener.initialMessage = initialMessage
listener.client = newSessionClient(client, logger, session)
for _, option := range options {
option(&listener)
}
return &listener, nil
}
func createSession(ctx context.Context, logger *logr.Logger, client actions.ActionsService, runnerScaleSetId int) (*actions.RunnerScaleSetSession, *actions.RunnerScaleSetMessage, error) {
hostName, err := os.Hostname()
if err != nil {
hostName = uuid.New().String()
logger.Info("could not get hostname, fail back to a random string.", "fallback", hostName)
}
var runnerScaleSetSession *actions.RunnerScaleSetSession
var retryCount int
for {
runnerScaleSetSession, err = client.CreateMessageSession(ctx, runnerScaleSetId, hostName)
if err == nil {
break
}
clientSideError := &actions.HttpClientSideError{}
if errors.As(err, &clientSideError) && clientSideError.Code != http.StatusConflict {
logger.Info("unable to create message session. The error indicates something is wrong on the client side, won't make any retry.")
return nil, nil, fmt.Errorf("create message session http request failed. %w", err)
}
retryCount++
if retryCount >= sessionCreationMaxRetryCount {
return nil, nil, fmt.Errorf("create message session failed since it exceed %d retry limit. %w", sessionCreationMaxRetryCount, err)
}
logger.Info("unable to create message session. Will try again in 30 seconds", "error", err.Error())
if ok := ctx.Value(testIgnoreSleep); ok == nil {
time.Sleep(getRandomDuration(30, 45))
}
}
statistics, _ := json.Marshal(runnerScaleSetSession.Statistics)
logger.Info("current runner scale set statistics.", "statistics", string(statistics))
if runnerScaleSetSession.Statistics.TotalAvailableJobs > 0 || runnerScaleSetSession.Statistics.TotalAssignedJobs > 0 {
acquirableJobs, err := client.GetAcquirableJobs(ctx, runnerScaleSetId)
if err != nil {
return nil, nil, fmt.Errorf("get acquirable jobs failed. %w", err)
}
acquirableJobsJson, err := json.Marshal(acquirableJobs.Jobs)
if err != nil {
return nil, nil, fmt.Errorf("marshal acquirable jobs failed. %w", err)
}
initialMessage := &actions.RunnerScaleSetMessage{
MessageId: 0,
MessageType: "RunnerScaleSetJobMessages",
Statistics: runnerScaleSetSession.Statistics,
Body: string(acquirableJobsJson),
}
return runnerScaleSetSession, initialMessage, nil
}
initialMessage := &actions.RunnerScaleSetMessage{
MessageId: 0,
MessageType: "RunnerScaleSetJobMessages",
Statistics: runnerScaleSetSession.Statistics,
Body: "",
}
return runnerScaleSetSession, initialMessage, nil
}
func (m *AutoScalerClient) Close() error {
m.logger.Info("closing.")
return m.client.Close()
}
func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error {
if m.initialMessage != nil {
err := handler(m.initialMessage)
if err != nil {
return fmt.Errorf("fail to process initial message. %w", err)
}
m.initialMessage = nil
return nil
}
for {
message, err := m.client.GetMessage(ctx, m.lastMessageId, maxCapacity)
if err != nil {
return fmt.Errorf("get message failed from refreshing client. %w", err)
}
if message == nil {
continue
}
err = handler(message)
if err != nil {
return fmt.Errorf("handle message failed. %w", err)
}
m.lastMessageId = message.MessageId
return m.deleteMessage(ctx, message.MessageId)
}
}
func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) error {
err := m.client.DeleteMessage(ctx, messageId)
if err != nil {
return fmt.Errorf("delete message failed from refreshing client. %w", err)
}
m.logger.Info("deleted message.", "messageId", messageId)
return nil
}
func (m *AutoScalerClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
m.logger.Info("acquiring jobs.", "request count", len(requestIds), "requestIds", fmt.Sprint(requestIds))
if len(requestIds) == 0 {
return nil
}
ids, err := m.client.AcquireJobs(ctx, requestIds)
if err != nil {
return fmt.Errorf("acquire jobs failed from refreshing client. %w", err)
}
m.logger.Info("acquired jobs.", "requested", len(requestIds), "acquired", len(ids))
return nil
}
func getRandomDuration(minSeconds, maxSeconds int) time.Duration {
return time.Duration(rand.Intn(maxSeconds-minSeconds)+minSeconds) * time.Second
}

View File

@ -1,735 +0,0 @@
package main
import (
"context"
"fmt"
"testing"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestCreateSession(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
require.NoError(t, err, "Error creating autoscaler client")
assert.Equal(t, session, session, "Session is not correct")
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestCreateSession_CreateInitMessage(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAssignedJobs: 5,
},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
Count: 1,
Jobs: []actions.AcquirableJob{
{
RunnerRequestId: 1,
OwnerName: "owner",
RepositoryName: "repo",
AcquireJobUrl: "https://github.com",
},
},
}, nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
require.NoError(t, err, "Error creating autoscaler client")
assert.Equal(t, session, session, "Session is not correct")
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
assert.Equal(t, int64(0), asClient.initialMessage.MessageId, "Initial message id should be 0")
assert.Equal(t, "RunnerScaleSetJobMessages", asClient.initialMessage.MessageType, "Initial message type should be RunnerScaleSetJobMessages")
assert.Equal(t, 5, asClient.initialMessage.Statistics.TotalAssignedJobs, "Initial message total assigned jobs should be 5")
assert.Equal(t, 1, asClient.initialMessage.Statistics.TotalAvailableJobs, "Initial message total available jobs should be 1")
assert.Equal(t, "[{\"acquireJobUrl\":\"https://github.com\",\"messageType\":\"\",\"runnerRequestId\":1,\"repositoryName\":\"repo\",\"ownerName\":\"owner\",\"jobWorkflowRef\":\"\",\"eventName\":\"\",\"requestLabels\":null}]", asClient.initialMessage.Body, "Initial message body is not correct")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestCreateSession_CreateInitMessageWithOnlyAssignedJobs(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{
TotalAssignedJobs: 5,
},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
Count: 0,
Jobs: []actions.AcquirableJob{},
}, nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
require.NoError(t, err, "Error creating autoscaler client")
assert.Equal(t, session, session, "Session is not correct")
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
assert.Equal(t, int64(0), asClient.initialMessage.MessageId, "Initial message id should be 0")
assert.Equal(t, "RunnerScaleSetJobMessages", asClient.initialMessage.MessageType, "Initial message type should be RunnerScaleSetJobMessages")
assert.Equal(t, 5, asClient.initialMessage.Statistics.TotalAssignedJobs, "Initial message total assigned jobs should be 5")
assert.Equal(t, 0, asClient.initialMessage.Statistics.TotalAvailableJobs, "Initial message total available jobs should be 0")
assert.Equal(t, "[]", asClient.initialMessage.Body, "Initial message body is not correct")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestCreateSession_CreateInitMessageFailed(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAssignedJobs: 5,
},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(nil, fmt.Errorf("error"))
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
assert.ErrorContains(t, err, "get acquirable jobs failed. error", "Unexpected error")
assert.Nil(t, asClient, "Client should be nil")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestCreateSession_RetrySessionConflict(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.WithValue(context.Background(), testIgnoreSleep, true)
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(nil, &actions.HttpClientSideError{
Code: 409,
}).Once()
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil).Once()
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
require.NoError(t, err, "Error creating autoscaler client")
assert.Equal(t, session, session, "Session is not correct")
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestCreateSession_RetrySessionConflict_RunOutOfRetry(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.WithValue(context.Background(), testIgnoreSleep, true)
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(nil, &actions.HttpClientSideError{
Code: 409,
})
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
assert.Error(t, err, "Error should be returned")
assert.Nil(t, asClient, "AutoScaler should be nil")
assert.True(t, mockActionsClient.AssertNumberOfCalls(t, "CreateMessageSession", sessionCreationMaxRetryCount), "CreateMessageSession should be called 10 times")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestCreateSession_NotRetryOnGeneralException(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.WithValue(context.Background(), testIgnoreSleep, true)
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(nil, &actions.HttpClientSideError{
Code: 403,
})
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
assert.Error(t, err, "Error should be returned")
assert.Nil(t, asClient, "AutoScaler should be nil")
assert.True(t, mockActionsClient.AssertNumberOfCalls(t, "CreateMessageSession", 1), "CreateMessageSession should be called 1 time and not retry on generic error")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestDeleteSession(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("Close").Return(nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
err = asClient.Close()
assert.NoError(t, err, "Error deleting session")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
}
func TestDeleteSession_Failed(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("Close").Return(fmt.Errorf("error"))
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
err = asClient.Close()
assert.Error(t, err, "Error should be returned")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
}
func TestGetRunnerScaleSetMessage(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "test",
Body: "test",
}, nil)
mockSessionClient.On("DeleteMessage", ctx, int64(1)).Return(nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil
}, 10)
assert.NoError(t, err, "Error getting message")
assert.Equal(t, int64(0), asClient.lastMessageId, "Initial message")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil
}, 10)
assert.NoError(t, err, "Error getting message")
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
}
func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "test",
Body: "test",
}, nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
// read initial message
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil
}, 10)
assert.NoError(t, err, "Error getting message")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return fmt.Errorf("error")
}, 10)
assert.ErrorContains(t, err, "handle message failed. error", "Error getting message")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should not be updated")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
}
func TestGetRunnerScaleSetMessage_HandleInitialMessage(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAssignedJobs: 2,
},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything, mock.Anything).Return(session, nil)
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
Count: 1,
Jobs: []actions.AcquirableJob{
{
RunnerRequestId: 1,
OwnerName: "owner",
RepositoryName: "repo",
AcquireJobUrl: "https://github.com",
},
},
}, nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
require.NoError(t, err, "Error creating autoscaler client")
require.NotNil(t, asClient.initialMessage, "Initial message should be set")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil
}, 10)
assert.NoError(t, err, "Error getting message")
assert.Nil(t, asClient.initialMessage, "Initial message should be nil")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestGetRunnerScaleSetMessage_HandleInitialMessageFailed(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
TotalAssignedJobs: 2,
},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
Count: 1,
Jobs: []actions.AcquirableJob{
{
RunnerRequestId: 1,
OwnerName: "owner",
RepositoryName: "repo",
AcquireJobUrl: "https://github.com",
},
},
}, nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
require.NoError(t, err, "Error creating autoscaler client")
require.NotNil(t, asClient.initialMessage, "Initial message should be set")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return fmt.Errorf("error")
}, 10)
assert.ErrorContains(t, err, "fail to process initial message. error", "Error getting message")
assert.NotNil(t, asClient.initialMessage, "Initial message should be nil")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, nil).Times(3)
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "test",
Body: "test",
}, nil).Once()
mockSessionClient.On("DeleteMessage", ctx, int64(1)).Return(nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil
}, 10)
assert.NoError(t, err, "Error getting initial message")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil
}, 10)
assert.NoError(t, err, "Error getting message")
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, fmt.Errorf("error"))
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
// process initial message
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
return nil
}, 10)
assert.NoError(t, err, "Error getting initial message")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
return fmt.Errorf("Should not be called")
}, 10)
assert.ErrorContains(t, err, "get message failed from refreshing client. error", "Error should be returned")
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
}
func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "test",
Body: "test",
}, nil)
mockSessionClient.On("DeleteMessage", ctx, int64(1)).Return(fmt.Errorf("error"))
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil
}, 10)
assert.NoError(t, err, "Error getting initial message")
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
return nil
}, 10)
assert.ErrorContains(t, err, "delete message failed from refreshing client. error", "Error getting message")
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
}
func TestAcquireJobsForRunnerScaleSet(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("AcquireJobs", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return([]int64{1, 2, 3}, nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
err = asClient.AcquireJobsForRunnerScaleSet(ctx, []int64{1, 2, 3})
assert.NoError(t, err, "Error acquiring jobs")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
}
func TestAcquireJobsForRunnerScaleSet_SkipEmptyList(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
err = asClient.AcquireJobsForRunnerScaleSet(ctx, []int64{})
assert.NoError(t, err, "Error acquiring jobs")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
}
func TestAcquireJobsForRunnerScaleSet_Failed(t *testing.T) {
mockActionsClient := &actions.MockActionsService{}
mockSessionClient := &actions.MockSessionService{}
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, err, "Error creating logger")
ctx := context.Background()
sessionId := uuid.New()
session := &actions.RunnerScaleSetSession{
SessionId: &sessionId,
OwnerName: "owner",
MessageQueueUrl: "https://github.com",
MessageQueueAccessToken: "token",
RunnerScaleSet: &actions.RunnerScaleSet{
Id: 1,
},
Statistics: &actions.RunnerScaleSetStatistic{},
}
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
mockSessionClient.On("AcquireJobs", ctx, mock.Anything).Return(nil, fmt.Errorf("error"))
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
asc.client = mockSessionClient
})
require.NoError(t, err, "Error creating autoscaler client")
err = asClient.AcquireJobsForRunnerScaleSet(ctx, []int64{1, 2, 3})
assert.ErrorContains(t, err, "acquire jobs failed from refreshing client. error", "Expect error acquiring jobs")
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
}

View File

@ -1,246 +0,0 @@
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
)
type ScaleSettings struct {
Namespace string
ResourceName string
MinRunners int
MaxRunners int
}
type Service struct {
ctx context.Context
logger logr.Logger
rsClient RunnerScaleSetClient
kubeManager KubernetesManager
settings *ScaleSettings
currentRunnerCount int
metricsExporter metricsExporter
errs []error
}
func WithPrometheusMetrics(conf config.Config) func(*Service) {
return func(svc *Service) {
parsedURL, err := actions.ParseGitHubConfigFromURL(conf.ConfigureUrl)
if err != nil {
svc.errs = append(svc.errs, err)
}
svc.metricsExporter.withBaseLabels(baseLabels{
scaleSetName: conf.EphemeralRunnerSetName,
scaleSetNamespace: conf.EphemeralRunnerSetNamespace,
enterprise: parsedURL.Enterprise,
organization: parsedURL.Organization,
repository: parsedURL.Repository,
})
}
}
func WithLogger(logger logr.Logger) func(*Service) {
return func(s *Service) {
s.logger = logger.WithName("service")
}
}
func NewService(
ctx context.Context,
rsClient RunnerScaleSetClient,
manager KubernetesManager,
settings *ScaleSettings,
options ...func(*Service),
) (*Service, error) {
s := &Service{
ctx: ctx,
rsClient: rsClient,
kubeManager: manager,
settings: settings,
currentRunnerCount: -1, // force patch on startup
logger: logr.FromContextOrDiscard(ctx),
}
for _, option := range options {
option(s)
}
if len(s.errs) > 0 {
return nil, errors.Join(s.errs...)
}
return s, nil
}
func (s *Service) Start() error {
s.metricsExporter.publishStatic(s.settings.MaxRunners, s.settings.MinRunners)
for {
s.logger.Info("waiting for message...")
select {
case <-s.ctx.Done():
s.logger.Info("service is stopped.")
return nil
default:
err := s.rsClient.GetRunnerScaleSetMessage(s.ctx, s.processMessage, s.settings.MaxRunners)
if err != nil {
return fmt.Errorf("could not get and process message. %w", err)
}
}
}
}
func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
s.logger.Info("process message.", "messageId", message.MessageId, "messageType", message.MessageType)
if message.Statistics == nil {
return fmt.Errorf("can't process message with empty statistics")
}
s.logger.Info("current runner scale set statistics.",
"available jobs", message.Statistics.TotalAvailableJobs,
"acquired jobs", message.Statistics.TotalAcquiredJobs,
"assigned jobs", message.Statistics.TotalAssignedJobs,
"running jobs", message.Statistics.TotalRunningJobs,
"registered runners", message.Statistics.TotalRegisteredRunners,
"busy runners", message.Statistics.TotalBusyRunners,
"idle runners", message.Statistics.TotalIdleRunners)
s.metricsExporter.publishStatistics(message.Statistics)
if message.MessageType != "RunnerScaleSetJobMessages" {
s.logger.Info("skip message with unknown message type.", "messageType", message.MessageType)
return nil
}
if message.MessageId == 0 && message.Body == "" { // initial message with statistics only
return s.scaleForAssignedJobCount(message.Statistics.TotalAssignedJobs)
}
var batchedMessages []json.RawMessage
if err := json.NewDecoder(strings.NewReader(message.Body)).Decode(&batchedMessages); err != nil {
return fmt.Errorf("could not decode job messages. %w", err)
}
s.logger.Info("process batched runner scale set job messages.", "messageId", message.MessageId, "batchSize", len(batchedMessages))
var availableJobs []int64
for _, message := range batchedMessages {
var messageType actions.JobMessageType
if err := json.Unmarshal(message, &messageType); err != nil {
return fmt.Errorf("could not decode job message type. %w", err)
}
switch messageType.MessageType {
case "JobAvailable":
var jobAvailable actions.JobAvailable
if err := json.Unmarshal(message, &jobAvailable); err != nil {
return fmt.Errorf("could not decode job available message. %w", err)
}
s.logger.Info(
"job available message received.",
"RequestId",
jobAvailable.RunnerRequestId,
)
availableJobs = append(availableJobs, jobAvailable.RunnerRequestId)
case "JobAssigned":
var jobAssigned actions.JobAssigned
if err := json.Unmarshal(message, &jobAssigned); err != nil {
return fmt.Errorf("could not decode job assigned message. %w", err)
}
s.logger.Info(
"job assigned message received.",
"RequestId",
jobAssigned.RunnerRequestId,
)
// s.metricsExporter.publishJobAssigned(&jobAssigned)
case "JobStarted":
var jobStarted actions.JobStarted
if err := json.Unmarshal(message, &jobStarted); err != nil {
return fmt.Errorf("could not decode job started message. %w", err)
}
s.logger.Info(
"job started message received.",
"RequestId",
jobStarted.RunnerRequestId,
"RunnerId",
jobStarted.RunnerId,
)
s.metricsExporter.publishJobStarted(&jobStarted)
s.updateJobInfoForRunner(jobStarted)
case "JobCompleted":
var jobCompleted actions.JobCompleted
if err := json.Unmarshal(message, &jobCompleted); err != nil {
return fmt.Errorf("could not decode job completed message. %w", err)
}
s.logger.Info(
"job completed message received.",
"RequestId",
jobCompleted.RunnerRequestId,
"Result",
jobCompleted.Result,
"RunnerId",
jobCompleted.RunnerId,
"RunnerName",
jobCompleted.RunnerName,
)
s.metricsExporter.publishJobCompleted(&jobCompleted)
default:
s.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
}
}
err := s.rsClient.AcquireJobsForRunnerScaleSet(s.ctx, availableJobs)
if err != nil {
return fmt.Errorf("could not acquire jobs. %w", err)
}
return s.scaleForAssignedJobCount(message.Statistics.TotalAssignedJobs)
}
func (s *Service) scaleForAssignedJobCount(count int) error {
// Max runners should always be set by the resource builder either to the configured value,
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
targetRunnerCount := min(s.settings.MinRunners+count, s.settings.MaxRunners)
s.metricsExporter.publishDesiredRunners(targetRunnerCount)
if targetRunnerCount != s.currentRunnerCount {
s.logger.Info("try scale runner request up/down base on assigned job count",
"assigned job", count,
"decision", targetRunnerCount,
"min", s.settings.MinRunners,
"max", s.settings.MaxRunners,
"currentRunnerCount", s.currentRunnerCount,
)
err := s.kubeManager.ScaleEphemeralRunnerSet(s.ctx, s.settings.Namespace, s.settings.ResourceName, targetRunnerCount)
if err != nil {
return fmt.Errorf("could not scale ephemeral runner set (%s/%s). %w", s.settings.Namespace, s.settings.ResourceName, err)
}
s.currentRunnerCount = targetRunnerCount
}
return nil
}
// updateJobInfoForRunner updates the ephemeral runner with the job info and this is best effort since the info is only for better telemetry
func (s *Service) updateJobInfoForRunner(jobInfo actions.JobStarted) {
s.logger.Info("update job info for runner",
"runnerName", jobInfo.RunnerName,
"ownerName", jobInfo.OwnerName,
"repoName", jobInfo.RepositoryName,
"workflowRef", jobInfo.JobWorkflowRef,
"workflowRunId", jobInfo.WorkflowRunId,
"jobDisplayName", jobInfo.JobDisplayName,
"requestId", jobInfo.RunnerRequestId,
)
err := s.kubeManager.UpdateEphemeralRunnerWithJobInfo(s.ctx, s.settings.Namespace, jobInfo.RunnerName, jobInfo.OwnerName, jobInfo.RepositoryName, jobInfo.JobWorkflowRef, jobInfo.JobDisplayName, jobInfo.WorkflowRunId, jobInfo.RunnerRequestId)
if err != nil {
s.logger.Error(err, "could not update ephemeral runner with job info", "runnerName", jobInfo.RunnerName, "requestId", jobInfo.RunnerRequestId)
}
}

View File

@ -1,684 +0,0 @@
package main
import (
"context"
"fmt"
"testing"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestNewService(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
assert.Equal(t, logger, service.logger)
}
func TestStart(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(mock.Arguments) { cancel() }).Return(nil).Once()
err = service.Start()
assert.NoError(t, err, "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestStart_ScaleToMinRunners(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 5,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
_ = service.scaleForAssignedJobCount(5)
}).Return(nil)
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
err = service.Start()
assert.NoError(t, err, "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestStart_ScaleToMinRunnersFailed(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 5,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
c := mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once()
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
_ = service.scaleForAssignedJobCount(5)
}).Return(c.ReturnArguments.Get(0))
err = service.Start()
assert.ErrorContains(t, err, "could not get and process message", "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestStart_GetMultipleMessages(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(5)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
err = service.Start()
assert.NoError(t, err, "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestStart_ErrorOnMessage(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(2)
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("error")).Once()
err = service.Start()
assert.ErrorContains(t, err, "could not get and process message. error", "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestProcessMessage_NoStatistic(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "test",
Body: "test",
})
assert.ErrorContains(t, err, "can't process message with empty statistics", "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestProcessMessage_IgnoreUnknownMessageType(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "unknown",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
},
Body: "[]",
})
assert.NoError(t, err, "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestProcessMessage_InvalidBatchMessageJson(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
},
Body: "invalid json",
})
assert.ErrorContains(t, err, "could not decode job messages", "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestProcessMessage_InvalidJobMessageJson(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAvailableJobs: 1,
},
Body: "[\"something\", \"test\"]",
})
assert.ErrorContains(t, err, "could not decode job message type", "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestProcessMessage_MultipleMessages(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 1,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAssignedJobs: 2,
TotalAvailableJobs: 2,
},
Body: "[{\"messageType\":\"JobAvailable\", \"runnerRequestId\": 3},{\"messageType\":\"JobAvailable\", \"runnerRequestId\": 4},{\"messageType\":\"JobAssigned\", \"runnerRequestId\": 2}, {\"messageType\":\"JobCompleted\", \"runnerRequestId\": 1, \"result\":\"succeed\"},{\"messageType\":\"unknown\"}]",
})
assert.NoError(t, err, "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestProcessMessage_AcquireJobsFailed(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 })).Return(fmt.Errorf("error")).Once()
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAssignedJobs: 1,
TotalAvailableJobs: 1,
},
Body: "[{\"messageType\":\"JobAvailable\", \"runnerRequestId\": 1}]",
})
assert.ErrorContains(t, err, "could not acquire jobs. error", "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestScaleForAssignedJobCount_DeDupScale(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 0,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
err = service.scaleForAssignedJobCount(2)
require.NoError(t, err, "Unexpected error")
err = service.scaleForAssignedJobCount(2)
require.NoError(t, err, "Unexpected error")
err = service.scaleForAssignedJobCount(2)
require.NoError(t, err, "Unexpected error")
err = service.scaleForAssignedJobCount(2)
assert.NoError(t, err, "Unexpected error")
assert.Equal(t, 2, service.currentRunnerCount, "Unexpected runner count")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 1,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 4).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
err = service.scaleForAssignedJobCount(0)
require.NoError(t, err, "Unexpected error")
err = service.scaleForAssignedJobCount(3)
require.NoError(t, err, "Unexpected error")
err = service.scaleForAssignedJobCount(5)
require.NoError(t, err, "Unexpected error")
err = service.scaleForAssignedJobCount(1)
require.NoError(t, err, "Unexpected error")
err = service.scaleForAssignedJobCount(10)
assert.NoError(t, err, "Unexpected error")
assert.Equal(t, 5, service.currentRunnerCount, "Unexpected runner count")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 1,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(fmt.Errorf("error"))
err = service.scaleForAssignedJobCount(2)
assert.ErrorContains(t, err, "could not scale ephemeral runner set (namespace/resource). error", "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestProcessMessage_JobStartedMessage(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 1,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
service.currentRunnerCount = 1
mockKubeManager.On(
"UpdateEphemeralRunnerWithJobInfo",
ctx,
service.settings.Namespace,
"runner1",
"owner1",
"repo1",
".github/workflows/ci.yaml",
"job1",
int64(100),
int64(3),
).Run(
func(_ mock.Arguments) { cancel() },
).Return(nil).Once()
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil)
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAssignedJobs: 1,
TotalAvailableJobs: 0,
},
Body: "[{\"messageType\":\"JobStarted\", \"runnerRequestId\": 3, \"runnerId\": 1, \"runnerName\": \"runner1\", \"ownerName\": \"owner1\", \"repositoryName\": \"repo1\", \"jobWorkflowRef\": \".github/workflows/ci.yaml\", \"jobDisplayName\": \"job1\", \"workflowRunId\": 100 }]",
})
assert.NoError(t, err, "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}
func TestProcessMessage_JobStartedMessageIgnoreRunnerUpdateError(t *testing.T) {
mockRsClient := &MockRunnerScaleSetClient{}
mockKubeManager := &MockKubernetesManager{}
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
logger = logger.WithName(t.Name())
require.NoError(t, log_err, "Error creating logger")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
service, err := NewService(
ctx,
mockRsClient,
mockKubeManager,
&ScaleSettings{
Namespace: "namespace",
ResourceName: "resource",
MinRunners: 1,
MaxRunners: 5,
},
func(s *Service) {
s.logger = logger
},
)
require.NoError(t, err)
service.currentRunnerCount = 1
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(fmt.Errorf("error")).Once()
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
err = service.processMessage(&actions.RunnerScaleSetMessage{
MessageId: 1,
MessageType: "RunnerScaleSetJobMessages",
Statistics: &actions.RunnerScaleSetStatistic{
TotalAssignedJobs: 0,
TotalAvailableJobs: 0,
},
Body: "[{\"messageType\":\"JobStarted\", \"runnerRequestId\": 3, \"runnerId\": 1, \"runnerName\": \"runner1\", \"ownerName\": \"owner1\", \"repositoryName\": \"repo1\", \"jobWorkflowRef\": \".github/workflows/ci.yaml\", \"jobDisplayName\": \"job1\", \"workflowRunId\": 100 }]",
})
assert.NoError(t, err, "Unexpected error")
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
}

View File

@ -1,76 +0,0 @@
package config
import (
"encoding/json"
"fmt"
"os"
)
type Config struct {
ConfigureUrl string `json:"configureUrl"`
AppID int64 `json:"appID"`
AppInstallationID int64 `json:"appInstallationID"`
AppPrivateKey string `json:"appPrivateKey"`
Token string `json:"token"`
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
MaxRunners int `json:"maxRunners"`
MinRunners int `json:"minRunners"`
RunnerScaleSetId int `json:"runnerScaleSetId"`
RunnerScaleSetName string `json:"runnerScaleSetName"`
ServerRootCA string `json:"serverRootCA"`
LogLevel string `json:"logLevel"`
LogFormat string `json:"logFormat"`
MetricsAddr string `json:"metricsAddr"`
MetricsEndpoint string `json:"metricsEndpoint"`
}
func Read(path string) (Config, error) {
f, err := os.Open(path)
if err != nil {
return Config{}, err
}
defer f.Close()
var config Config
if err := json.NewDecoder(f).Decode(&config); err != nil {
return Config{}, fmt.Errorf("failed to decode config: %w", err)
}
if err := config.validate(); err != nil {
return Config{}, fmt.Errorf("failed to validate config: %w", err)
}
return config, nil
}
func (c *Config) validate() error {
if len(c.ConfigureUrl) == 0 {
return fmt.Errorf("GitHubConfigUrl is not provided")
}
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
}
if c.RunnerScaleSetId == 0 {
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
}
if c.MaxRunners < c.MinRunners {
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
}
hasToken := len(c.Token) > 0
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
if !hasToken && !hasPrivateKeyConfig {
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
}
if hasToken && hasPrivateKeyConfig {
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
}
return nil
}

View File

@ -1,92 +0,0 @@
package config
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConfigValidationMinMax(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 5,
MaxRunners: 2,
Token: "token",
}
err := config.validate()
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
}
func TestConfigValidationMissingToken(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationAppKey(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
AppPrivateKey: "asdf",
Token: "asdf",
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidation(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
Token: "asdf",
}
err := config.validate()
assert.NoError(t, err, "Expected no error")
}
func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.validate()
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}

View File

@ -1,12 +0,0 @@
package main
import (
"context"
)
//go:generate mockery --inpackage --name=KubernetesManager
type KubernetesManager interface {
ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error
UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, jobRequestId, workflowRunId int64) error
}

Some files were not shown because too many files have changed in this diff Show More