Merge branch 'master' into master
This commit is contained in:
commit
9bd284fb7b
|
|
@ -18,7 +18,7 @@ on:
|
|||
workflow_dispatch:
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.16.1
|
||||
HELM_VERSION: v3.8.0
|
||||
HELM_VERSION: v3.17.0
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
|
@ -46,22 +46,6 @@ jobs:
|
|||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||
--ignore-test pod-networkpolicy
|
||||
--ignore-test deployment-has-poddisruptionbudget
|
||||
--ignore-test deployment-has-host-podantiaffinity
|
||||
--ignore-test container-security-context
|
||||
--ignore-test pod-probes
|
||||
--ignore-test container-image-tag
|
||||
--enable-optional-test container-security-context-privileged
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ RUN --mount=target=. \
|
|||
--mount=type=cache,mode=0777,target=${GOCACHE} \
|
||||
export GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOARM=${TARGETVARIANT#v} && \
|
||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/manager main.go && \
|
||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener && \
|
||||
go build -trimpath -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=${VERSION}' -X 'github.com/actions/actions-runner-controller/build.CommitSHA=${COMMIT_SHA}'" -o /out/ghalistener ./cmd/ghalistener && \
|
||||
go build -trimpath -ldflags="-s -w" -o /out/github-webhook-server ./cmd/githubwebhookserver && \
|
||||
go build -trimpath -ldflags="-s -w" -o /out/actions-metrics-server ./cmd/actionsmetricsserver && \
|
||||
|
|
@ -52,7 +51,6 @@ WORKDIR /
|
|||
COPY --from=builder /out/manager .
|
||||
COPY --from=builder /out/github-webhook-server .
|
||||
COPY --from=builder /out/actions-metrics-server .
|
||||
COPY --from=builder /out/github-runnerscaleset-listener .
|
||||
COPY --from=builder /out/ghalistener .
|
||||
COPY --from=builder /out/sleep .
|
||||
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -87,7 +87,7 @@ test-with-deps: kube-apiserver etcd kubectl
|
|||
# Build manager binary
|
||||
manager: generate fmt vet
|
||||
go build -o bin/manager main.go
|
||||
go build -o bin/github-runnerscaleset-listener ./cmd/githubrunnerscalesetlistener
|
||||
go build -o bin/github-runnerscaleset-listener ./cmd/ghalistener
|
||||
|
||||
# Run against the configured Kubernetes cluster in ~/.kube/config
|
||||
run: generate fmt vet manifests
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
Thank you for installing {{ .Chart.Name }}.
|
||||
|
||||
Your release is named {{ .Release.Name }}.
|
||||
|
||||
WARNING: Older version of the listener (githubrunnerscalesetlistener) is deprecated and will be removed in the future gha-runner-scale-set-0.10.0 release. If you are using environment variable override to force the old listener, please remove the environment variable and use the new listener (ghalistener) instead.
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.autoscalingRunnerSet) }}
|
||||
apiVersion: actions.github.com/v1alpha1
|
||||
kind: AutoscalingRunnerSet
|
||||
metadata:
|
||||
|
|
@ -10,9 +11,25 @@ metadata:
|
|||
name: {{ include "gha-runner-scale-set.scale-set-name" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.autoscalingRunnerSet.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/component: "autoscaling-runner-set"
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- with .Values.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.autoscalingRunnerSet.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
actions.github.com/values-hash: {{ toJson .Values | sha256sum | trunc 63 }}
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,29 @@
|
|||
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.githubConfigSecret) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.githubConfigSecret.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- with .Values.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.githubConfigSecret.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
data:
|
||||
|
|
|
|||
|
|
@ -1,12 +1,28 @@
|
|||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRole) }}
|
||||
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
# default permission for runner pod service account in kubernetes mode (container hook)
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
finalizers:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.kubernetesModeRole.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- with .Values.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.kubernetesModeRole.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- actions.github.com/cleanup-protection
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
|
|
|
|||
|
|
@ -1,10 +1,31 @@
|
|||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRoleBinding) }}
|
||||
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.kubernetesModeRoleBinding.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
|
||||
annotations:
|
||||
{{- with .Values.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.kubernetesModeRoleBinding.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
roleRef:
|
||||
|
|
|
|||
|
|
@ -1,18 +1,32 @@
|
|||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeServiceAccount) }}
|
||||
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
{{- if .Values.containerMode.kubernetesModeServiceAccount }}
|
||||
{{- with .Values.containerMode.kubernetesModeServiceAccount.annotations }}
|
||||
{{- if or .Values.annotations $hasCustomResourceMeta }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.kubernetesModeServiceAccount.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.kubernetesModeServiceAccount.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,29 @@
|
|||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.managerRole) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.managerRole.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: manager-role
|
||||
annotations:
|
||||
{{- with .Values.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.managerRole.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
rules:
|
||||
|
|
|
|||
|
|
@ -1,11 +1,29 @@
|
|||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.managerRoleBinding) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.managerRoleBinding.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: manager-role-binding
|
||||
annotations:
|
||||
{{- with .Values.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.managerRoleBinding.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
roleRef:
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.noPermissionServiceAccount) }}
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
apiVersion: v1
|
||||
|
|
@ -6,7 +7,24 @@ metadata:
|
|||
name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.noPermissionServiceAccount.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- with .Values.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $hasCustomResourceMeta }}
|
||||
{{- with .Values.resourceMeta.noPermissionServiceAccount.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -743,37 +743,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testin
|
|||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedKubernetesModeServiceAccountAnnotations(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
testValuesPath, err := filepath.Abs("../tests/values_kubernetes_mode_service_account_annotations.yaml")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
ValuesFiles: []string{testValuesPath},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
|
||||
|
||||
var sa corev1.ServiceAccount
|
||||
helm.UnmarshalK8SYaml(t, output, &sa)
|
||||
|
||||
assert.Equal(t, "arn:aws:iam::123456789012:role/sample-role", sa.Annotations["eks.amazonaws.com/role-arn"], "Annotations should be arn:aws:iam::123456789012:role/sample-role")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
|
@ -2145,6 +2114,209 @@ func TestAutoscalingRunnerSetAnnotationValuesHash(t *testing.T) {
|
|||
assert.LessOrEqual(t, len(secondHash), 63)
|
||||
}
|
||||
|
||||
func TestCustomLabels(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"containerMode.type": "kubernetes",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
`labels.argocd\.argoproj\.io/sync-wave`: `"1"`,
|
||||
`labels.app\.kubernetes\.io/part-of`: "no-override", // this shouldn't be overwritten
|
||||
"resourceMeta.autoscalingRunnerSet.labels.ars-custom": "ars-custom-value",
|
||||
"resourceMeta.githubConfigSecret.labels.gh-custom": "gh-custom-value",
|
||||
"resourceMeta.kubernetesModeRole.labels.kmr-custom": "kmr-custom-value",
|
||||
"resourceMeta.kubernetesModeRoleBinding.labels.kmrb-custom": "kmrb-custom-value",
|
||||
"resourceMeta.kubernetesModeServiceAccount.labels.kmsa-custom": "kmsa-custom-value",
|
||||
"resourceMeta.managerRole.labels.mr-custom": "mr-custom-value",
|
||||
"resourceMeta.managerRoleBinding.labels.mrb-custom": "mrb-custom-value",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"})
|
||||
|
||||
const targetLabel = "argocd.argoproj.io/sync-wave"
|
||||
const wantCustomValue = `"1"`
|
||||
const reservedLabel = "app.kubernetes.io/part-of"
|
||||
const wantReservedValue = "gha-rs"
|
||||
|
||||
var githubSecret corev1.Secret
|
||||
helm.UnmarshalK8SYaml(t, output, &githubSecret)
|
||||
assert.Equal(t, wantCustomValue, githubSecret.Labels[targetLabel])
|
||||
assert.Equal(t, wantReservedValue, githubSecret.Labels[reservedLabel])
|
||||
assert.Equal(t, "gh-custom-value", githubSecret.Labels["gh-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
||||
var role rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &role)
|
||||
assert.Equal(t, wantCustomValue, role.Labels[targetLabel])
|
||||
assert.Equal(t, wantReservedValue, role.Labels[reservedLabel])
|
||||
assert.Equal(t, "kmr-custom-value", role.Labels["kmr-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"})
|
||||
var roleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
||||
assert.Equal(t, wantCustomValue, roleBinding.Labels[targetLabel])
|
||||
assert.Equal(t, wantReservedValue, roleBinding.Labels[reservedLabel])
|
||||
assert.Equal(t, "kmrb-custom-value", roleBinding.Labels["kmrb-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
assert.Equal(t, wantCustomValue, ars.Labels[targetLabel])
|
||||
assert.Equal(t, wantReservedValue, ars.Labels[reservedLabel])
|
||||
assert.Equal(t, "ars-custom-value", ars.Labels["ars-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
|
||||
var serviceAccount corev1.ServiceAccount
|
||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||
assert.Equal(t, wantCustomValue, serviceAccount.Labels[targetLabel])
|
||||
assert.Equal(t, wantReservedValue, serviceAccount.Labels[reservedLabel])
|
||||
assert.Equal(t, "kmsa-custom-value", serviceAccount.Labels["kmsa-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
|
||||
var managerRole rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
||||
assert.Equal(t, wantCustomValue, managerRole.Labels[targetLabel])
|
||||
assert.Equal(t, wantReservedValue, managerRole.Labels[reservedLabel])
|
||||
assert.Equal(t, "mr-custom-value", managerRole.Labels["mr-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
|
||||
var managerRoleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
|
||||
assert.Equal(t, wantCustomValue, managerRoleBinding.Labels[targetLabel])
|
||||
assert.Equal(t, wantReservedValue, managerRoleBinding.Labels[reservedLabel])
|
||||
assert.Equal(t, "mrb-custom-value", managerRoleBinding.Labels["mrb-custom"])
|
||||
|
||||
options = &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
`labels.argocd\.argoproj\.io/sync-wave`: `"1"`,
|
||||
"resourceMeta.noPermissionServiceAccount.labels.npsa-custom": "npsa-custom-value",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/no_permission_serviceaccount.yaml"})
|
||||
var noPermissionServiceAccount corev1.ServiceAccount
|
||||
helm.UnmarshalK8SYaml(t, output, &noPermissionServiceAccount)
|
||||
assert.Equal(t, wantCustomValue, noPermissionServiceAccount.Labels[targetLabel])
|
||||
assert.Equal(t, wantReservedValue, noPermissionServiceAccount.Labels[reservedLabel])
|
||||
assert.Equal(t, "npsa-custom-value", noPermissionServiceAccount.Labels["npsa-custom"])
|
||||
}
|
||||
|
||||
func TestCustomAnnotations(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"containerMode.type": "kubernetes",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
`annotations.argocd\.argoproj\.io/sync-wave`: `"1"`,
|
||||
"resourceMeta.autoscalingRunnerSet.annotations.ars-custom": "ars-custom-value",
|
||||
"resourceMeta.githubConfigSecret.annotations.gh-custom": "gh-custom-value",
|
||||
"resourceMeta.kubernetesModeRole.annotations.kmr-custom": "kmr-custom-value",
|
||||
"resourceMeta.kubernetesModeRoleBinding.annotations.kmrb-custom": "kmrb-custom-value",
|
||||
"resourceMeta.kubernetesModeServiceAccount.annotations.kmsa-custom": "kmsa-custom-value",
|
||||
"resourceMeta.managerRole.annotations.mr-custom": "mr-custom-value",
|
||||
"resourceMeta.managerRoleBinding.annotations.mrb-custom": "mrb-custom-value",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
const targetAnnotations = "argocd.argoproj.io/sync-wave"
|
||||
const wantCustomValue = `"1"`
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"})
|
||||
|
||||
var githubSecret corev1.Secret
|
||||
helm.UnmarshalK8SYaml(t, output, &githubSecret)
|
||||
assert.Equal(t, wantCustomValue, githubSecret.Annotations[targetAnnotations])
|
||||
assert.Equal(t, "gh-custom-value", githubSecret.Annotations["gh-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
||||
var role rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &role)
|
||||
assert.Equal(t, wantCustomValue, role.Annotations[targetAnnotations])
|
||||
assert.Equal(t, "kmr-custom-value", role.Annotations["kmr-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"})
|
||||
var roleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
||||
assert.Equal(t, wantCustomValue, roleBinding.Annotations[targetAnnotations])
|
||||
assert.Equal(t, "kmrb-custom-value", roleBinding.Annotations["kmrb-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
assert.Equal(t, wantCustomValue, ars.Annotations[targetAnnotations])
|
||||
assert.Equal(t, "ars-custom-value", ars.Annotations["ars-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
|
||||
var serviceAccount corev1.ServiceAccount
|
||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||
assert.Equal(t, wantCustomValue, serviceAccount.Annotations[targetAnnotations])
|
||||
assert.Equal(t, "kmsa-custom-value", serviceAccount.Annotations["kmsa-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
|
||||
var managerRole rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
||||
assert.Equal(t, wantCustomValue, managerRole.Annotations[targetAnnotations])
|
||||
assert.Equal(t, "mr-custom-value", managerRole.Annotations["mr-custom"])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
|
||||
var managerRoleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
|
||||
assert.Equal(t, wantCustomValue, managerRoleBinding.Annotations[targetAnnotations])
|
||||
assert.Equal(t, "mrb-custom-value", managerRoleBinding.Annotations["mrb-custom"])
|
||||
|
||||
options = &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
`annotations.argocd\.argoproj\.io/sync-wave`: `"1"`,
|
||||
"resourceMeta.noPermissionServiceAccount.annotations.npsa-custom": "npsa-custom-value",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/no_permission_serviceaccount.yaml"})
|
||||
var noPermissionServiceAccount corev1.ServiceAccount
|
||||
helm.UnmarshalK8SYaml(t, output, &noPermissionServiceAccount)
|
||||
assert.Equal(t, wantCustomValue, noPermissionServiceAccount.Annotations[targetAnnotations])
|
||||
assert.Equal(t, "npsa-custom-value", noPermissionServiceAccount.Annotations["npsa-custom"])
|
||||
}
|
||||
|
||||
func TestNamespaceOverride(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +0,0 @@
|
|||
githubConfigUrl: https://github.com/actions/actions-runner-controller
|
||||
githubConfigSecret:
|
||||
github_token: test
|
||||
containerMode:
|
||||
type: kubernetes
|
||||
kubernetesModeServiceAccount:
|
||||
annotations:
|
||||
eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/sample-role
|
||||
|
|
@ -4,15 +4,15 @@ githubConfigUrl: ""
|
|||
|
||||
## githubConfigSecret is the k8s secret information to use when authenticating via the GitHub API.
|
||||
## You can choose to supply:
|
||||
## A) a PAT token,
|
||||
## B) a GitHub App, or
|
||||
## A) a PAT token,
|
||||
## B) a GitHub App, or
|
||||
## C) a pre-defined Kubernetes secret.
|
||||
## The syntax for each of these variations is documented below.
|
||||
## (Variation A) When using a PAT token, the syntax is as follows:
|
||||
githubConfigSecret:
|
||||
# Example:
|
||||
# Example:
|
||||
# github_token: "ghp_sampleSampleSampleSampleSampleSample"
|
||||
github_token: ""
|
||||
github_token: ""
|
||||
#
|
||||
## (Variation B) When using a GitHub App, the syntax is as follows:
|
||||
# githubConfigSecret:
|
||||
|
|
@ -100,8 +100,7 @@ githubConfigSecret:
|
|||
# resources:
|
||||
# requests:
|
||||
# storage: 1Gi
|
||||
# kubernetesModeServiceAccount:
|
||||
# annotations:
|
||||
#
|
||||
|
||||
## listenerTemplate is the PodSpec for each listener Pod
|
||||
## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
|
||||
|
|
@ -219,3 +218,63 @@ template:
|
|||
|
||||
# Overrides the default `.Release.Namespace` for all resources in this chart.
|
||||
namespaceOverride: ""
|
||||
|
||||
## Optional annotations and labels applied to all resources created by helm installation
|
||||
##
|
||||
## Annotations applied to all resources created by this helm chart. Annotations will not override the default ones, so make sure
|
||||
## the custom annotation is not reserved.
|
||||
# annotations:
|
||||
# key: value
|
||||
##
|
||||
## Labels applied to all resources created by this helm chart. Labels will not override the default ones, so make sure
|
||||
## the custom label is not reserved.
|
||||
# labels:
|
||||
# key: value
|
||||
|
||||
## If you want more fine-grained control over annotations applied to particular resource created by this chart,
|
||||
## you can use `resourceMeta`.
|
||||
## Order of applying labels and annotations is:
|
||||
## 1. Apply labels/annotations globally, using `annotations` and `labels` field
|
||||
## 2. Apply `resourceMeta` labels/annotations
|
||||
## 3. Apply reserved labels/annotations
|
||||
# resourceMeta:
|
||||
# autoscalingRunnerSet:
|
||||
# labels:
|
||||
# key: value
|
||||
# annotations:
|
||||
# key: value
|
||||
# githubConfigSecret:
|
||||
# labels:
|
||||
# key: value
|
||||
# annotations:
|
||||
# key: value
|
||||
# kubernetesModeRole:
|
||||
# labels:
|
||||
# key: value
|
||||
# annotations:
|
||||
# key: value
|
||||
# kubernetesModeRoleBinding:
|
||||
# labels:
|
||||
# key: value
|
||||
# annotations:
|
||||
# key: value
|
||||
# kubernetesModeServiceAccount:
|
||||
# labels:
|
||||
# key: value
|
||||
# annotations:
|
||||
# key: value
|
||||
# managerRole:
|
||||
# labels:
|
||||
# key: value
|
||||
# annotations:
|
||||
# key: value
|
||||
# managerRoleBinding:
|
||||
# labels:
|
||||
# key: value
|
||||
# annotations:
|
||||
# key: value
|
||||
# noPermissionServiceAccount:
|
||||
# labels:
|
||||
# key: value
|
||||
# annotations:
|
||||
# key: value
|
||||
|
|
|
|||
|
|
@ -16,22 +16,22 @@ import (
|
|||
)
|
||||
|
||||
type Config struct {
|
||||
ConfigureUrl string `json:"configureUrl"`
|
||||
AppID int64 `json:"appID"`
|
||||
AppInstallationID int64 `json:"appInstallationID"`
|
||||
AppPrivateKey string `json:"appPrivateKey"`
|
||||
ConfigureUrl string `json:"configure_url"`
|
||||
AppID int64 `json:"app_id"`
|
||||
AppInstallationID int64 `json:"app_installation_id"`
|
||||
AppPrivateKey string `json:"app_private_key"`
|
||||
Token string `json:"token"`
|
||||
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
|
||||
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
|
||||
MaxRunners int `json:"maxRunners"`
|
||||
MinRunners int `json:"minRunners"`
|
||||
RunnerScaleSetId int `json:"runnerScaleSetId"`
|
||||
RunnerScaleSetName string `json:"runnerScaleSetName"`
|
||||
ServerRootCA string `json:"serverRootCA"`
|
||||
LogLevel string `json:"logLevel"`
|
||||
LogFormat string `json:"logFormat"`
|
||||
MetricsAddr string `json:"metricsAddr"`
|
||||
MetricsEndpoint string `json:"metricsEndpoint"`
|
||||
EphemeralRunnerSetNamespace string `json:"ephemeral_runner_set_namespace"`
|
||||
EphemeralRunnerSetName string `json:"ephemeral_runner_set_name"`
|
||||
MaxRunners int `json:"max_runners"`
|
||||
MinRunners int `json:"min_runners"`
|
||||
RunnerScaleSetId int `json:"runner_scale_set_id"`
|
||||
RunnerScaleSetName string `json:"runner_scale_set_name"`
|
||||
ServerRootCA string `json:"server_root_ca"`
|
||||
LogLevel string `json:"log_level"`
|
||||
LogFormat string `json:"log_format"`
|
||||
MetricsAddr string `json:"metrics_addr"`
|
||||
MetricsEndpoint string `json:"metrics_endpoint"`
|
||||
}
|
||||
|
||||
func Read(path string) (Config, error) {
|
||||
|
|
|
|||
|
|
@ -1,129 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type AutoScalerKubernetesManager struct {
|
||||
*kubernetes.Clientset
|
||||
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func NewKubernetesManager(logger *logr.Logger) (*AutoScalerKubernetesManager, error) {
|
||||
conf, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var manager = &AutoScalerKubernetesManager{
|
||||
Clientset: kubeClient,
|
||||
logger: logger.WithName("KubernetesManager"),
|
||||
}
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error {
|
||||
original := &v1alpha1.EphemeralRunnerSet{
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: -1,
|
||||
},
|
||||
}
|
||||
originalJson, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
k.logger.Error(err, "could not marshal empty ephemeral runner set")
|
||||
}
|
||||
|
||||
patch := &v1alpha1.EphemeralRunnerSet{
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: runnerCount,
|
||||
},
|
||||
}
|
||||
patchJson, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
k.logger.Error(err, "could not marshal patch ephemeral runner set")
|
||||
}
|
||||
mergePatch, err := jsonpatch.CreateMergePatch(originalJson, patchJson)
|
||||
if err != nil {
|
||||
k.logger.Error(err, "could not create merge patch json for ephemeral runner set")
|
||||
}
|
||||
|
||||
k.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch))
|
||||
|
||||
patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{}
|
||||
err = k.RESTClient().
|
||||
Patch(types.MergePatchType).
|
||||
Prefix("apis", "actions.github.com", "v1alpha1").
|
||||
Namespace(namespace).
|
||||
Resource("EphemeralRunnerSets").
|
||||
Name(resourceName).
|
||||
Body([]byte(mergePatch)).
|
||||
Do(ctx).
|
||||
Into(patchedEphemeralRunnerSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch ephemeral runner set , patch JSON: %s, error: %w", string(mergePatch), err)
|
||||
}
|
||||
|
||||
k.logger.Info("Ephemeral runner set scaled.", "namespace", namespace, "name", resourceName, "replicas", patchedEphemeralRunnerSet.Spec.Replicas)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *AutoScalerKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, workflowRunId, jobRequestId int64) error {
|
||||
original := &v1alpha1.EphemeralRunner{}
|
||||
originalJson, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal empty ephemeral runner, error: %w", err)
|
||||
}
|
||||
|
||||
patch := &v1alpha1.EphemeralRunner{
|
||||
Status: v1alpha1.EphemeralRunnerStatus{
|
||||
JobRequestId: jobRequestId,
|
||||
JobRepositoryName: fmt.Sprintf("%s/%s", ownerName, repositoryName),
|
||||
WorkflowRunId: workflowRunId,
|
||||
JobWorkflowRef: jobWorkflowRef,
|
||||
JobDisplayName: jobDisplayName,
|
||||
},
|
||||
}
|
||||
patchedJson, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal patched ephemeral runner, error: %w", err)
|
||||
}
|
||||
|
||||
mergePatch, err := jsonpatch.CreateMergePatch(originalJson, patchedJson)
|
||||
if err != nil {
|
||||
k.logger.Error(err, "could not create merge patch json for ephemeral runner")
|
||||
}
|
||||
|
||||
k.logger.Info("Created merge patch json for EphemeralRunner status update", "json", string(mergePatch))
|
||||
|
||||
patchedStatus := &v1alpha1.EphemeralRunner{}
|
||||
err = k.RESTClient().
|
||||
Patch(types.MergePatchType).
|
||||
Prefix("apis", "actions.github.com", "v1alpha1").
|
||||
Namespace(namespace).
|
||||
Resource("EphemeralRunners").
|
||||
Name(resourceName).
|
||||
SubResource("status").
|
||||
Body(mergePatch).
|
||||
Do(ctx).
|
||||
Into(patchedStatus)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch ephemeral runner status, patch JSON: %s, error: %w", string(mergePatch), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,191 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
sessionCreationMaxRetryCount = 10
|
||||
)
|
||||
|
||||
type devContextKey bool
|
||||
|
||||
var testIgnoreSleep devContextKey = true
|
||||
|
||||
type AutoScalerClient struct {
|
||||
client actions.SessionService
|
||||
logger logr.Logger
|
||||
|
||||
lastMessageId int64
|
||||
initialMessage *actions.RunnerScaleSetMessage
|
||||
}
|
||||
|
||||
func NewAutoScalerClient(
|
||||
ctx context.Context,
|
||||
client actions.ActionsService,
|
||||
logger *logr.Logger,
|
||||
runnerScaleSetId int,
|
||||
options ...func(*AutoScalerClient),
|
||||
) (*AutoScalerClient, error) {
|
||||
listener := AutoScalerClient{
|
||||
logger: logger.WithName("auto_scaler"),
|
||||
}
|
||||
|
||||
session, initialMessage, err := createSession(ctx, &listener.logger, client, runnerScaleSetId)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fail to create session. %w", err)
|
||||
}
|
||||
|
||||
listener.lastMessageId = 0
|
||||
listener.initialMessage = initialMessage
|
||||
listener.client = newSessionClient(client, logger, session)
|
||||
|
||||
for _, option := range options {
|
||||
option(&listener)
|
||||
}
|
||||
|
||||
return &listener, nil
|
||||
}
|
||||
|
||||
func createSession(ctx context.Context, logger *logr.Logger, client actions.ActionsService, runnerScaleSetId int) (*actions.RunnerScaleSetSession, *actions.RunnerScaleSetMessage, error) {
|
||||
hostName, err := os.Hostname()
|
||||
if err != nil {
|
||||
hostName = uuid.New().String()
|
||||
logger.Info("could not get hostname, fail back to a random string.", "fallback", hostName)
|
||||
}
|
||||
|
||||
var runnerScaleSetSession *actions.RunnerScaleSetSession
|
||||
var retryCount int
|
||||
for {
|
||||
runnerScaleSetSession, err = client.CreateMessageSession(ctx, runnerScaleSetId, hostName)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
clientSideError := &actions.HttpClientSideError{}
|
||||
if errors.As(err, &clientSideError) && clientSideError.Code != http.StatusConflict {
|
||||
logger.Info("unable to create message session. The error indicates something is wrong on the client side, won't make any retry.")
|
||||
return nil, nil, fmt.Errorf("create message session http request failed. %w", err)
|
||||
}
|
||||
|
||||
retryCount++
|
||||
if retryCount >= sessionCreationMaxRetryCount {
|
||||
return nil, nil, fmt.Errorf("create message session failed since it exceed %d retry limit. %w", sessionCreationMaxRetryCount, err)
|
||||
}
|
||||
|
||||
logger.Info("unable to create message session. Will try again in 30 seconds", "error", err.Error())
|
||||
if ok := ctx.Value(testIgnoreSleep); ok == nil {
|
||||
time.Sleep(getRandomDuration(30, 45))
|
||||
}
|
||||
}
|
||||
|
||||
statistics, _ := json.Marshal(runnerScaleSetSession.Statistics)
|
||||
logger.Info("current runner scale set statistics.", "statistics", string(statistics))
|
||||
|
||||
if runnerScaleSetSession.Statistics.TotalAvailableJobs > 0 || runnerScaleSetSession.Statistics.TotalAssignedJobs > 0 {
|
||||
acquirableJobs, err := client.GetAcquirableJobs(ctx, runnerScaleSetId)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("get acquirable jobs failed. %w", err)
|
||||
}
|
||||
|
||||
acquirableJobsJson, err := json.Marshal(acquirableJobs.Jobs)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("marshal acquirable jobs failed. %w", err)
|
||||
}
|
||||
|
||||
initialMessage := &actions.RunnerScaleSetMessage{
|
||||
MessageId: 0,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: runnerScaleSetSession.Statistics,
|
||||
Body: string(acquirableJobsJson),
|
||||
}
|
||||
|
||||
return runnerScaleSetSession, initialMessage, nil
|
||||
}
|
||||
|
||||
initialMessage := &actions.RunnerScaleSetMessage{
|
||||
MessageId: 0,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: runnerScaleSetSession.Statistics,
|
||||
Body: "",
|
||||
}
|
||||
|
||||
return runnerScaleSetSession, initialMessage, nil
|
||||
}
|
||||
|
||||
func (m *AutoScalerClient) Close() error {
|
||||
m.logger.Info("closing.")
|
||||
return m.client.Close()
|
||||
}
|
||||
|
||||
func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
||||
if m.initialMessage != nil {
|
||||
err := handler(m.initialMessage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fail to process initial message. %w", err)
|
||||
}
|
||||
|
||||
m.initialMessage = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
message, err := m.client.GetMessage(ctx, m.lastMessageId, maxCapacity)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get message failed from refreshing client. %w", err)
|
||||
}
|
||||
|
||||
if message == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
err = handler(message)
|
||||
if err != nil {
|
||||
return fmt.Errorf("handle message failed. %w", err)
|
||||
}
|
||||
|
||||
m.lastMessageId = message.MessageId
|
||||
|
||||
return m.deleteMessage(ctx, message.MessageId)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) error {
|
||||
err := m.client.DeleteMessage(ctx, messageId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete message failed from refreshing client. %w", err)
|
||||
}
|
||||
|
||||
m.logger.Info("deleted message.", "messageId", messageId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AutoScalerClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
|
||||
m.logger.Info("acquiring jobs.", "request count", len(requestIds), "requestIds", fmt.Sprint(requestIds))
|
||||
if len(requestIds) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ids, err := m.client.AcquireJobs(ctx, requestIds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("acquire jobs failed from refreshing client. %w", err)
|
||||
}
|
||||
|
||||
m.logger.Info("acquired jobs.", "requested", len(requestIds), "acquired", len(ids))
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRandomDuration(minSeconds, maxSeconds int) time.Duration {
|
||||
return time.Duration(rand.Intn(maxSeconds-minSeconds)+minSeconds) * time.Second
|
||||
}
|
||||
|
|
@ -1,735 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCreateSession(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
|
||||
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
assert.Equal(t, session, session, "Session is not correct")
|
||||
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestCreateSession_CreateInitMessage(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAvailableJobs: 1,
|
||||
TotalAssignedJobs: 5,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
|
||||
Count: 1,
|
||||
Jobs: []actions.AcquirableJob{
|
||||
{
|
||||
RunnerRequestId: 1,
|
||||
OwnerName: "owner",
|
||||
RepositoryName: "repo",
|
||||
AcquireJobUrl: "https://github.com",
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
|
||||
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
assert.Equal(t, session, session, "Session is not correct")
|
||||
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
||||
assert.Equal(t, int64(0), asClient.initialMessage.MessageId, "Initial message id should be 0")
|
||||
assert.Equal(t, "RunnerScaleSetJobMessages", asClient.initialMessage.MessageType, "Initial message type should be RunnerScaleSetJobMessages")
|
||||
assert.Equal(t, 5, asClient.initialMessage.Statistics.TotalAssignedJobs, "Initial message total assigned jobs should be 5")
|
||||
assert.Equal(t, 1, asClient.initialMessage.Statistics.TotalAvailableJobs, "Initial message total available jobs should be 1")
|
||||
assert.Equal(t, "[{\"acquireJobUrl\":\"https://github.com\",\"messageType\":\"\",\"runnerRequestId\":1,\"repositoryName\":\"repo\",\"ownerName\":\"owner\",\"jobWorkflowRef\":\"\",\"eventName\":\"\",\"requestLabels\":null}]", asClient.initialMessage.Body, "Initial message body is not correct")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestCreateSession_CreateInitMessageWithOnlyAssignedJobs(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAssignedJobs: 5,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
|
||||
Count: 0,
|
||||
Jobs: []actions.AcquirableJob{},
|
||||
}, nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
|
||||
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
assert.Equal(t, session, session, "Session is not correct")
|
||||
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
||||
assert.Equal(t, int64(0), asClient.initialMessage.MessageId, "Initial message id should be 0")
|
||||
assert.Equal(t, "RunnerScaleSetJobMessages", asClient.initialMessage.MessageType, "Initial message type should be RunnerScaleSetJobMessages")
|
||||
assert.Equal(t, 5, asClient.initialMessage.Statistics.TotalAssignedJobs, "Initial message total assigned jobs should be 5")
|
||||
assert.Equal(t, 0, asClient.initialMessage.Statistics.TotalAvailableJobs, "Initial message total available jobs should be 0")
|
||||
assert.Equal(t, "[]", asClient.initialMessage.Body, "Initial message body is not correct")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestCreateSession_CreateInitMessageFailed(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAvailableJobs: 1,
|
||||
TotalAssignedJobs: 5,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(nil, fmt.Errorf("error"))
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
|
||||
|
||||
assert.ErrorContains(t, err, "get acquirable jobs failed. error", "Unexpected error")
|
||||
assert.Nil(t, asClient, "Client should be nil")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestCreateSession_RetrySessionConflict(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.WithValue(context.Background(), testIgnoreSleep, true)
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(nil, &actions.HttpClientSideError{
|
||||
Code: 409,
|
||||
}).Once()
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil).Once()
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
|
||||
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
assert.Equal(t, session, session, "Session is not correct")
|
||||
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestCreateSession_RetrySessionConflict_RunOutOfRetry(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.WithValue(context.Background(), testIgnoreSleep, true)
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(nil, &actions.HttpClientSideError{
|
||||
Code: 409,
|
||||
})
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
|
||||
|
||||
assert.Error(t, err, "Error should be returned")
|
||||
assert.Nil(t, asClient, "AutoScaler should be nil")
|
||||
assert.True(t, mockActionsClient.AssertNumberOfCalls(t, "CreateMessageSession", sessionCreationMaxRetryCount), "CreateMessageSession should be called 10 times")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestCreateSession_NotRetryOnGeneralException(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.WithValue(context.Background(), testIgnoreSleep, true)
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(nil, &actions.HttpClientSideError{
|
||||
Code: 403,
|
||||
})
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
|
||||
|
||||
assert.Error(t, err, "Error should be returned")
|
||||
assert.Nil(t, asClient, "AutoScaler should be nil")
|
||||
assert.True(t, mockActionsClient.AssertNumberOfCalls(t, "CreateMessageSession", 1), "CreateMessageSession should be called 1 time and not retry on generic error")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestDeleteSession(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockSessionClient.On("Close").Return(nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.Close()
|
||||
assert.NoError(t, err, "Error deleting session")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestDeleteSession_Failed(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockSessionClient.On("Close").Return(fmt.Errorf("error"))
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.Close()
|
||||
assert.Error(t, err, "Error should be returned")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestGetRunnerScaleSetMessage(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "test",
|
||||
Body: "test",
|
||||
}, nil)
|
||||
mockSessionClient.On("DeleteMessage", ctx, int64(1)).Return(nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
}, 10)
|
||||
|
||||
assert.NoError(t, err, "Error getting message")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Initial message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
}, 10)
|
||||
|
||||
assert.NoError(t, err, "Error getting message")
|
||||
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "test",
|
||||
Body: "test",
|
||||
}, nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
// read initial message
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
}, 10)
|
||||
|
||||
assert.NoError(t, err, "Error getting message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return fmt.Errorf("error")
|
||||
}, 10)
|
||||
|
||||
assert.ErrorContains(t, err, "handle message failed. error", "Error getting message")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should not be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestGetRunnerScaleSetMessage_HandleInitialMessage(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAvailableJobs: 1,
|
||||
TotalAssignedJobs: 2,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything, mock.Anything).Return(session, nil)
|
||||
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
|
||||
Count: 1,
|
||||
Jobs: []actions.AcquirableJob{
|
||||
{
|
||||
RunnerRequestId: 1,
|
||||
OwnerName: "owner",
|
||||
RepositoryName: "repo",
|
||||
AcquireJobUrl: "https://github.com",
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
require.NotNil(t, asClient.initialMessage, "Initial message should be set")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
}, 10)
|
||||
|
||||
assert.NoError(t, err, "Error getting message")
|
||||
assert.Nil(t, asClient.initialMessage, "Initial message should be nil")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestGetRunnerScaleSetMessage_HandleInitialMessageFailed(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAvailableJobs: 1,
|
||||
TotalAssignedJobs: 2,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{
|
||||
Count: 1,
|
||||
Jobs: []actions.AcquirableJob{
|
||||
{
|
||||
RunnerRequestId: 1,
|
||||
OwnerName: "owner",
|
||||
RepositoryName: "repo",
|
||||
AcquireJobUrl: "https://github.com",
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1)
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
require.NotNil(t, asClient.initialMessage, "Initial message should be set")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return fmt.Errorf("error")
|
||||
}, 10)
|
||||
|
||||
assert.ErrorContains(t, err, "fail to process initial message. error", "Error getting message")
|
||||
assert.NotNil(t, asClient.initialMessage, "Initial message should be nil")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, nil).Times(3)
|
||||
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "test",
|
||||
Body: "test",
|
||||
}, nil).Once()
|
||||
mockSessionClient.On("DeleteMessage", ctx, int64(1)).Return(nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
}, 10)
|
||||
assert.NoError(t, err, "Error getting initial message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
}, 10)
|
||||
|
||||
assert.NoError(t, err, "Error getting message")
|
||||
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, fmt.Errorf("error"))
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
// process initial message
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
return nil
|
||||
}, 10)
|
||||
assert.NoError(t, err, "Error getting initial message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
return fmt.Errorf("Should not be called")
|
||||
}, 10)
|
||||
|
||||
assert.ErrorContains(t, err, "get message failed from refreshing client. error", "Error should be returned")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "test",
|
||||
Body: "test",
|
||||
}, nil)
|
||||
mockSessionClient.On("DeleteMessage", ctx, int64(1)).Return(fmt.Errorf("error"))
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
}, 10)
|
||||
assert.NoError(t, err, "Error getting initial message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
}, 10)
|
||||
|
||||
assert.ErrorContains(t, err, "delete message failed from refreshing client. error", "Error getting message")
|
||||
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestAcquireJobsForRunnerScaleSet(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockSessionClient.On("AcquireJobs", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return([]int64{1, 2, 3}, nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.AcquireJobsForRunnerScaleSet(ctx, []int64{1, 2, 3})
|
||||
assert.NoError(t, err, "Error acquiring jobs")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestAcquireJobsForRunnerScaleSet_SkipEmptyList(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.AcquireJobsForRunnerScaleSet(ctx, []int64{})
|
||||
assert.NoError(t, err, "Error acquiring jobs")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestAcquireJobsForRunnerScaleSet_Failed(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
mockSessionClient := &actions.MockSessionService{}
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
Statistics: &actions.RunnerScaleSetStatistic{},
|
||||
}
|
||||
mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil)
|
||||
mockSessionClient.On("AcquireJobs", ctx, mock.Anything).Return(nil, fmt.Errorf("error"))
|
||||
|
||||
asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) {
|
||||
asc.client = mockSessionClient
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.AcquireJobsForRunnerScaleSet(ctx, []int64{1, 2, 3})
|
||||
assert.ErrorContains(t, err, "acquire jobs failed from refreshing client. error", "Expect error acquiring jobs")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
|
@ -1,246 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
type ScaleSettings struct {
|
||||
Namespace string
|
||||
ResourceName string
|
||||
MinRunners int
|
||||
MaxRunners int
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
logger logr.Logger
|
||||
rsClient RunnerScaleSetClient
|
||||
kubeManager KubernetesManager
|
||||
settings *ScaleSettings
|
||||
currentRunnerCount int
|
||||
metricsExporter metricsExporter
|
||||
errs []error
|
||||
}
|
||||
|
||||
func WithPrometheusMetrics(conf config.Config) func(*Service) {
|
||||
return func(svc *Service) {
|
||||
parsedURL, err := actions.ParseGitHubConfigFromURL(conf.ConfigureUrl)
|
||||
if err != nil {
|
||||
svc.errs = append(svc.errs, err)
|
||||
}
|
||||
|
||||
svc.metricsExporter.withBaseLabels(baseLabels{
|
||||
scaleSetName: conf.EphemeralRunnerSetName,
|
||||
scaleSetNamespace: conf.EphemeralRunnerSetNamespace,
|
||||
enterprise: parsedURL.Enterprise,
|
||||
organization: parsedURL.Organization,
|
||||
repository: parsedURL.Repository,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(logger logr.Logger) func(*Service) {
|
||||
return func(s *Service) {
|
||||
s.logger = logger.WithName("service")
|
||||
}
|
||||
}
|
||||
|
||||
func NewService(
|
||||
ctx context.Context,
|
||||
rsClient RunnerScaleSetClient,
|
||||
manager KubernetesManager,
|
||||
settings *ScaleSettings,
|
||||
options ...func(*Service),
|
||||
) (*Service, error) {
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
rsClient: rsClient,
|
||||
kubeManager: manager,
|
||||
settings: settings,
|
||||
currentRunnerCount: -1, // force patch on startup
|
||||
logger: logr.FromContextOrDiscard(ctx),
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(s)
|
||||
}
|
||||
|
||||
if len(s.errs) > 0 {
|
||||
return nil, errors.Join(s.errs...)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Service) Start() error {
|
||||
s.metricsExporter.publishStatic(s.settings.MaxRunners, s.settings.MinRunners)
|
||||
for {
|
||||
s.logger.Info("waiting for message...")
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
s.logger.Info("service is stopped.")
|
||||
return nil
|
||||
default:
|
||||
err := s.rsClient.GetRunnerScaleSetMessage(s.ctx, s.processMessage, s.settings.MaxRunners)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get and process message. %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
|
||||
s.logger.Info("process message.", "messageId", message.MessageId, "messageType", message.MessageType)
|
||||
if message.Statistics == nil {
|
||||
return fmt.Errorf("can't process message with empty statistics")
|
||||
}
|
||||
|
||||
s.logger.Info("current runner scale set statistics.",
|
||||
"available jobs", message.Statistics.TotalAvailableJobs,
|
||||
"acquired jobs", message.Statistics.TotalAcquiredJobs,
|
||||
"assigned jobs", message.Statistics.TotalAssignedJobs,
|
||||
"running jobs", message.Statistics.TotalRunningJobs,
|
||||
"registered runners", message.Statistics.TotalRegisteredRunners,
|
||||
"busy runners", message.Statistics.TotalBusyRunners,
|
||||
"idle runners", message.Statistics.TotalIdleRunners)
|
||||
|
||||
s.metricsExporter.publishStatistics(message.Statistics)
|
||||
|
||||
if message.MessageType != "RunnerScaleSetJobMessages" {
|
||||
s.logger.Info("skip message with unknown message type.", "messageType", message.MessageType)
|
||||
return nil
|
||||
}
|
||||
|
||||
if message.MessageId == 0 && message.Body == "" { // initial message with statistics only
|
||||
return s.scaleForAssignedJobCount(message.Statistics.TotalAssignedJobs)
|
||||
}
|
||||
|
||||
var batchedMessages []json.RawMessage
|
||||
if err := json.NewDecoder(strings.NewReader(message.Body)).Decode(&batchedMessages); err != nil {
|
||||
return fmt.Errorf("could not decode job messages. %w", err)
|
||||
}
|
||||
|
||||
s.logger.Info("process batched runner scale set job messages.", "messageId", message.MessageId, "batchSize", len(batchedMessages))
|
||||
|
||||
var availableJobs []int64
|
||||
for _, message := range batchedMessages {
|
||||
var messageType actions.JobMessageType
|
||||
if err := json.Unmarshal(message, &messageType); err != nil {
|
||||
return fmt.Errorf("could not decode job message type. %w", err)
|
||||
}
|
||||
|
||||
switch messageType.MessageType {
|
||||
case "JobAvailable":
|
||||
var jobAvailable actions.JobAvailable
|
||||
if err := json.Unmarshal(message, &jobAvailable); err != nil {
|
||||
return fmt.Errorf("could not decode job available message. %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
"job available message received.",
|
||||
"RequestId",
|
||||
jobAvailable.RunnerRequestId,
|
||||
)
|
||||
availableJobs = append(availableJobs, jobAvailable.RunnerRequestId)
|
||||
case "JobAssigned":
|
||||
var jobAssigned actions.JobAssigned
|
||||
if err := json.Unmarshal(message, &jobAssigned); err != nil {
|
||||
return fmt.Errorf("could not decode job assigned message. %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
"job assigned message received.",
|
||||
"RequestId",
|
||||
jobAssigned.RunnerRequestId,
|
||||
)
|
||||
// s.metricsExporter.publishJobAssigned(&jobAssigned)
|
||||
case "JobStarted":
|
||||
var jobStarted actions.JobStarted
|
||||
if err := json.Unmarshal(message, &jobStarted); err != nil {
|
||||
return fmt.Errorf("could not decode job started message. %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
"job started message received.",
|
||||
"RequestId",
|
||||
jobStarted.RunnerRequestId,
|
||||
"RunnerId",
|
||||
jobStarted.RunnerId,
|
||||
)
|
||||
s.metricsExporter.publishJobStarted(&jobStarted)
|
||||
s.updateJobInfoForRunner(jobStarted)
|
||||
case "JobCompleted":
|
||||
var jobCompleted actions.JobCompleted
|
||||
if err := json.Unmarshal(message, &jobCompleted); err != nil {
|
||||
return fmt.Errorf("could not decode job completed message. %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
"job completed message received.",
|
||||
"RequestId",
|
||||
jobCompleted.RunnerRequestId,
|
||||
"Result",
|
||||
jobCompleted.Result,
|
||||
"RunnerId",
|
||||
jobCompleted.RunnerId,
|
||||
"RunnerName",
|
||||
jobCompleted.RunnerName,
|
||||
)
|
||||
s.metricsExporter.publishJobCompleted(&jobCompleted)
|
||||
default:
|
||||
s.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
|
||||
}
|
||||
}
|
||||
|
||||
err := s.rsClient.AcquireJobsForRunnerScaleSet(s.ctx, availableJobs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not acquire jobs. %w", err)
|
||||
}
|
||||
|
||||
return s.scaleForAssignedJobCount(message.Statistics.TotalAssignedJobs)
|
||||
}
|
||||
|
||||
func (s *Service) scaleForAssignedJobCount(count int) error {
|
||||
// Max runners should always be set by the resource builder either to the configured value,
|
||||
// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
|
||||
targetRunnerCount := min(s.settings.MinRunners+count, s.settings.MaxRunners)
|
||||
s.metricsExporter.publishDesiredRunners(targetRunnerCount)
|
||||
if targetRunnerCount != s.currentRunnerCount {
|
||||
s.logger.Info("try scale runner request up/down base on assigned job count",
|
||||
"assigned job", count,
|
||||
"decision", targetRunnerCount,
|
||||
"min", s.settings.MinRunners,
|
||||
"max", s.settings.MaxRunners,
|
||||
"currentRunnerCount", s.currentRunnerCount,
|
||||
)
|
||||
err := s.kubeManager.ScaleEphemeralRunnerSet(s.ctx, s.settings.Namespace, s.settings.ResourceName, targetRunnerCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not scale ephemeral runner set (%s/%s). %w", s.settings.Namespace, s.settings.ResourceName, err)
|
||||
}
|
||||
|
||||
s.currentRunnerCount = targetRunnerCount
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateJobInfoForRunner updates the ephemeral runner with the job info and this is best effort since the info is only for better telemetry
|
||||
func (s *Service) updateJobInfoForRunner(jobInfo actions.JobStarted) {
|
||||
s.logger.Info("update job info for runner",
|
||||
"runnerName", jobInfo.RunnerName,
|
||||
"ownerName", jobInfo.OwnerName,
|
||||
"repoName", jobInfo.RepositoryName,
|
||||
"workflowRef", jobInfo.JobWorkflowRef,
|
||||
"workflowRunId", jobInfo.WorkflowRunId,
|
||||
"jobDisplayName", jobInfo.JobDisplayName,
|
||||
"requestId", jobInfo.RunnerRequestId,
|
||||
)
|
||||
err := s.kubeManager.UpdateEphemeralRunnerWithJobInfo(s.ctx, s.settings.Namespace, jobInfo.RunnerName, jobInfo.OwnerName, jobInfo.RepositoryName, jobInfo.JobWorkflowRef, jobInfo.JobDisplayName, jobInfo.WorkflowRunId, jobInfo.RunnerRequestId)
|
||||
if err != nil {
|
||||
s.logger.Error(err, "could not update ephemeral runner with job info", "runnerName", jobInfo.RunnerName, "requestId", jobInfo.RunnerRequestId)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,684 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewService(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, logger, service.logger)
|
||||
}
|
||||
|
||||
func TestStart(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(mock.Arguments) { cancel() }).Return(nil).Once()
|
||||
|
||||
err = service.Start()
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestStart_ScaleToMinRunners(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 5,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
||||
_ = service.scaleForAssignedJobCount(5)
|
||||
}).Return(nil)
|
||||
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||
|
||||
err = service.Start()
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestStart_ScaleToMinRunnersFailed(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 5,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
c := mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once()
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
||||
_ = service.scaleForAssignedJobCount(5)
|
||||
}).Return(c.ReturnArguments.Get(0))
|
||||
|
||||
err = service.Start()
|
||||
|
||||
assert.ErrorContains(t, err, "could not get and process message", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestStart_GetMultipleMessages(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(5)
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||
|
||||
err = service.Start()
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestStart_ErrorOnMessage(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(2)
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("error")).Once()
|
||||
|
||||
err = service.Start()
|
||||
|
||||
assert.ErrorContains(t, err, "could not get and process message. error", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestProcessMessage_NoStatistic(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "test",
|
||||
Body: "test",
|
||||
})
|
||||
|
||||
assert.ErrorContains(t, err, "can't process message with empty statistics", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestProcessMessage_IgnoreUnknownMessageType(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "unknown",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAvailableJobs: 1,
|
||||
},
|
||||
Body: "[]",
|
||||
})
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestProcessMessage_InvalidBatchMessageJson(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAvailableJobs: 1,
|
||||
},
|
||||
Body: "invalid json",
|
||||
})
|
||||
|
||||
assert.ErrorContains(t, err, "could not decode job messages", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestProcessMessage_InvalidJobMessageJson(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAvailableJobs: 1,
|
||||
},
|
||||
Body: "[\"something\", \"test\"]",
|
||||
})
|
||||
|
||||
assert.ErrorContains(t, err, "could not decode job message type", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestProcessMessage_MultipleMessages(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAssignedJobs: 2,
|
||||
TotalAvailableJobs: 2,
|
||||
},
|
||||
Body: "[{\"messageType\":\"JobAvailable\", \"runnerRequestId\": 3},{\"messageType\":\"JobAvailable\", \"runnerRequestId\": 4},{\"messageType\":\"JobAssigned\", \"runnerRequestId\": 2}, {\"messageType\":\"JobCompleted\", \"runnerRequestId\": 1, \"result\":\"succeed\"},{\"messageType\":\"unknown\"}]",
|
||||
})
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestProcessMessage_AcquireJobsFailed(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 })).Return(fmt.Errorf("error")).Once()
|
||||
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAssignedJobs: 1,
|
||||
TotalAvailableJobs: 1,
|
||||
},
|
||||
Body: "[{\"messageType\":\"JobAvailable\", \"runnerRequestId\": 1}]",
|
||||
})
|
||||
|
||||
assert.ErrorContains(t, err, "could not acquire jobs. error", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestScaleForAssignedJobCount_DeDupScale(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 0,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
|
||||
|
||||
err = service.scaleForAssignedJobCount(2)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
err = service.scaleForAssignedJobCount(2)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
err = service.scaleForAssignedJobCount(2)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
err = service.scaleForAssignedJobCount(2)
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.Equal(t, 2, service.currentRunnerCount, "Unexpected runner count")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 4).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||
|
||||
err = service.scaleForAssignedJobCount(0)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
err = service.scaleForAssignedJobCount(3)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
err = service.scaleForAssignedJobCount(5)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
err = service.scaleForAssignedJobCount(1)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
err = service.scaleForAssignedJobCount(10)
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.Equal(t, 5, service.currentRunnerCount, "Unexpected runner count")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(fmt.Errorf("error"))
|
||||
|
||||
err = service.scaleForAssignedJobCount(2)
|
||||
|
||||
assert.ErrorContains(t, err, "could not scale ephemeral runner set (namespace/resource). error", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestProcessMessage_JobStartedMessage(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.currentRunnerCount = 1
|
||||
|
||||
mockKubeManager.On(
|
||||
"UpdateEphemeralRunnerWithJobInfo",
|
||||
ctx,
|
||||
service.settings.Namespace,
|
||||
"runner1",
|
||||
"owner1",
|
||||
"repo1",
|
||||
".github/workflows/ci.yaml",
|
||||
"job1",
|
||||
int64(100),
|
||||
int64(3),
|
||||
).Run(
|
||||
func(_ mock.Arguments) { cancel() },
|
||||
).Return(nil).Once()
|
||||
|
||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil)
|
||||
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAssignedJobs: 1,
|
||||
TotalAvailableJobs: 0,
|
||||
},
|
||||
Body: "[{\"messageType\":\"JobStarted\", \"runnerRequestId\": 3, \"runnerId\": 1, \"runnerName\": \"runner1\", \"ownerName\": \"owner1\", \"repositoryName\": \"repo1\", \"jobWorkflowRef\": \".github/workflows/ci.yaml\", \"jobDisplayName\": \"job1\", \"workflowRunId\": 100 }]",
|
||||
})
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestProcessMessage_JobStartedMessageIgnoreRunnerUpdateError(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
&ScaleSettings{
|
||||
Namespace: "namespace",
|
||||
ResourceName: "resource",
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
},
|
||||
func(s *Service) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.currentRunnerCount = 1
|
||||
|
||||
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(fmt.Errorf("error")).Once()
|
||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
||||
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
TotalAssignedJobs: 0,
|
||||
TotalAvailableJobs: 0,
|
||||
},
|
||||
Body: "[{\"messageType\":\"JobStarted\", \"runnerRequestId\": 3, \"runnerId\": 1, \"runnerName\": \"runner1\", \"ownerName\": \"owner1\", \"repositoryName\": \"repo1\", \"jobWorkflowRef\": \".github/workflows/ci.yaml\", \"jobDisplayName\": \"job1\", \"workflowRunId\": 100 }]",
|
||||
})
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
|
@ -1,76 +0,0 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
ConfigureUrl string `json:"configureUrl"`
|
||||
AppID int64 `json:"appID"`
|
||||
AppInstallationID int64 `json:"appInstallationID"`
|
||||
AppPrivateKey string `json:"appPrivateKey"`
|
||||
Token string `json:"token"`
|
||||
EphemeralRunnerSetNamespace string `json:"ephemeralRunnerSetNamespace"`
|
||||
EphemeralRunnerSetName string `json:"ephemeralRunnerSetName"`
|
||||
MaxRunners int `json:"maxRunners"`
|
||||
MinRunners int `json:"minRunners"`
|
||||
RunnerScaleSetId int `json:"runnerScaleSetId"`
|
||||
RunnerScaleSetName string `json:"runnerScaleSetName"`
|
||||
ServerRootCA string `json:"serverRootCA"`
|
||||
LogLevel string `json:"logLevel"`
|
||||
LogFormat string `json:"logFormat"`
|
||||
MetricsAddr string `json:"metricsAddr"`
|
||||
MetricsEndpoint string `json:"metricsEndpoint"`
|
||||
}
|
||||
|
||||
func Read(path string) (Config, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var config Config
|
||||
if err := json.NewDecoder(f).Decode(&config); err != nil {
|
||||
return Config{}, fmt.Errorf("failed to decode config: %w", err)
|
||||
}
|
||||
|
||||
if err := config.validate(); err != nil {
|
||||
return Config{}, fmt.Errorf("failed to validate config: %w", err)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func (c *Config) validate() error {
|
||||
if len(c.ConfigureUrl) == 0 {
|
||||
return fmt.Errorf("GitHubConfigUrl is not provided")
|
||||
}
|
||||
|
||||
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
|
||||
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
|
||||
}
|
||||
|
||||
if c.RunnerScaleSetId == 0 {
|
||||
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
|
||||
}
|
||||
|
||||
if c.MaxRunners < c.MinRunners {
|
||||
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
|
||||
}
|
||||
|
||||
hasToken := len(c.Token) > 0
|
||||
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
|
||||
|
||||
if !hasToken && !hasPrivateKeyConfig {
|
||||
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||
}
|
||||
|
||||
if hasToken && hasPrivateKeyConfig {
|
||||
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfigValidationMinMax(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 5,
|
||||
MaxRunners: 2,
|
||||
Token: "token",
|
||||
}
|
||||
err := config.validate()
|
||||
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
|
||||
}
|
||||
|
||||
func TestConfigValidationMissingToken(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.validate()
|
||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidationAppKey(t *testing.T) {
|
||||
config := &Config{
|
||||
AppID: 1,
|
||||
AppInstallationID: 10,
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.validate()
|
||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
|
||||
config := &Config{
|
||||
AppID: 1,
|
||||
AppInstallationID: 10,
|
||||
AppPrivateKey: "asdf",
|
||||
Token: "asdf",
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.validate()
|
||||
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidation(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
Token: "asdf",
|
||||
}
|
||||
|
||||
err := config.validate()
|
||||
|
||||
assert.NoError(t, err, "Expected no error")
|
||||
}
|
||||
|
||||
func TestConfigValidationConfigUrl(t *testing.T) {
|
||||
config := &Config{
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
|
||||
err := config.validate()
|
||||
|
||||
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
|
||||
}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
//go:generate mockery --inpackage --name=KubernetesManager
|
||||
type KubernetesManager interface {
|
||||
ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error
|
||||
|
||||
UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, jobRequestId, workflowRunId int64) error
|
||||
}
|
||||
|
|
@ -1,244 +0,0 @@
|
|||
/*
|
||||
Copyright 2021 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func main() {
|
||||
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
rc, err := config.Read(configPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: reading config from path(%q): %v\n", configPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logLevel := string(logging.LogLevelDebug)
|
||||
if rc.LogLevel != "" {
|
||||
logLevel = rc.LogLevel
|
||||
}
|
||||
|
||||
logFormat := string(logging.LogFormatText)
|
||||
if rc.LogFormat != "" {
|
||||
logFormat = rc.LogFormat
|
||||
}
|
||||
|
||||
logger, err := logging.NewLogger(logLevel, logFormat)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
g.Go(func() error {
|
||||
opts := runOptions{
|
||||
serviceOptions: []func(*Service){
|
||||
WithLogger(logger),
|
||||
},
|
||||
}
|
||||
opts.serviceOptions = append(opts.serviceOptions, WithPrometheusMetrics(rc))
|
||||
|
||||
return run(ctx, rc, logger, opts)
|
||||
})
|
||||
|
||||
if len(rc.MetricsAddr) != 0 {
|
||||
g.Go(func() error {
|
||||
metricsServer := metricsServer{
|
||||
rc: rc,
|
||||
logger: logger,
|
||||
}
|
||||
g.Go(func() error {
|
||||
<-ctx.Done()
|
||||
return metricsServer.shutdown()
|
||||
})
|
||||
return metricsServer.listenAndServe()
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
logger.Error(err, "Error encountered")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
type metricsServer struct {
|
||||
rc config.Config
|
||||
logger logr.Logger
|
||||
srv *http.Server
|
||||
}
|
||||
|
||||
func (s *metricsServer) shutdown() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
return s.srv.Shutdown(ctx)
|
||||
}
|
||||
|
||||
func (s *metricsServer) listenAndServe() error {
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(
|
||||
// availableJobs,
|
||||
// acquiredJobs,
|
||||
assignedJobs,
|
||||
runningJobs,
|
||||
registeredRunners,
|
||||
busyRunners,
|
||||
minRunners,
|
||||
maxRunners,
|
||||
desiredRunners,
|
||||
idleRunners,
|
||||
startedJobsTotal,
|
||||
completedJobsTotal,
|
||||
// jobQueueDurationSeconds,
|
||||
jobStartupDurationSeconds,
|
||||
jobExecutionDurationSeconds,
|
||||
)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(
|
||||
s.rc.MetricsEndpoint,
|
||||
promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}),
|
||||
)
|
||||
|
||||
s.srv = &http.Server{
|
||||
Addr: s.rc.MetricsAddr,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
s.logger.Info("Starting metrics server", "address", s.srv.Addr)
|
||||
return s.srv.ListenAndServe()
|
||||
}
|
||||
|
||||
type runOptions struct {
|
||||
serviceOptions []func(*Service)
|
||||
}
|
||||
|
||||
func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOptions) error {
|
||||
// Create root context and hook with sigint and sigterm
|
||||
creds := &actions.ActionsAuth{}
|
||||
if rc.Token != "" {
|
||||
creds.Token = rc.Token
|
||||
} else {
|
||||
creds.AppCreds = &actions.GitHubAppAuth{
|
||||
AppID: rc.AppID,
|
||||
AppInstallationID: rc.AppInstallationID,
|
||||
AppPrivateKey: rc.AppPrivateKey,
|
||||
}
|
||||
}
|
||||
|
||||
actionsServiceClient, err := newActionsClientFromConfig(
|
||||
rc,
|
||||
creds,
|
||||
actions.WithLogger(logger),
|
||||
)
|
||||
actionsServiceClient.SetUserAgent(actions.UserAgentInfo{
|
||||
Version: build.Version,
|
||||
CommitSHA: build.CommitSHA,
|
||||
ScaleSetID: rc.RunnerScaleSetId,
|
||||
HasProxy: hasProxy(),
|
||||
Subsystem: "githubrunnerscalesetlistener",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create an Actions Service client: %w", err)
|
||||
}
|
||||
|
||||
// Create message listener
|
||||
autoScalerClient, err := NewAutoScalerClient(ctx, actionsServiceClient, &logger, rc.RunnerScaleSetId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create a message listener: %w", err)
|
||||
}
|
||||
defer autoScalerClient.Close()
|
||||
|
||||
// Create kube manager and scale controller
|
||||
kubeManager, err := NewKubernetesManager(&logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create kubernetes manager: %w", err)
|
||||
}
|
||||
|
||||
scaleSettings := &ScaleSettings{
|
||||
Namespace: rc.EphemeralRunnerSetNamespace,
|
||||
ResourceName: rc.EphemeralRunnerSetName,
|
||||
MaxRunners: rc.MaxRunners,
|
||||
MinRunners: rc.MinRunners,
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, autoScalerClient, kubeManager, scaleSettings, opts.serviceOptions...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new service: %v", err)
|
||||
}
|
||||
|
||||
// Start listening for messages
|
||||
if err = service.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start message queue listener: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newActionsClientFromConfig(config config.Config, creds *actions.ActionsAuth, options ...actions.ClientOption) (*actions.Client, error) {
|
||||
if config.ServerRootCA != "" {
|
||||
systemPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load system cert pool: %w", err)
|
||||
}
|
||||
pool := systemPool.Clone()
|
||||
ok := pool.AppendCertsFromPEM([]byte(config.ServerRootCA))
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to parse root certificate")
|
||||
}
|
||||
|
||||
options = append(options, actions.WithRootCAs(pool))
|
||||
}
|
||||
|
||||
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
|
||||
options = append(options, actions.WithProxy(func(req *http.Request) (*url.URL, error) {
|
||||
return proxyFunc(req.URL)
|
||||
}))
|
||||
|
||||
return actions.NewClient(config.ConfigureUrl, creds, options...)
|
||||
}
|
||||
|
||||
func hasProxy() bool {
|
||||
proxyFunc := httpproxy.FromEnvironment().ProxyFunc()
|
||||
return proxyFunc != nil
|
||||
}
|
||||
|
|
@ -1,169 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
||||
)
|
||||
|
||||
func TestCustomerServerRootCA(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
certsFolder := filepath.Join(
|
||||
"../../",
|
||||
"github",
|
||||
"actions",
|
||||
"testdata",
|
||||
)
|
||||
certPath := filepath.Join(certsFolder, "server.crt")
|
||||
keyPath := filepath.Join(certsFolder, "server.key")
|
||||
|
||||
serverCalledSuccessfully := false
|
||||
|
||||
server := testserver.NewUnstarted(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
serverCalledSuccessfully = true
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`{"count": 0}`))
|
||||
}))
|
||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}}
|
||||
server.StartTLS()
|
||||
|
||||
var certsString string
|
||||
rootCA, err := os.ReadFile(filepath.Join(certsFolder, "rootCA.crt"))
|
||||
require.NoError(t, err)
|
||||
certsString = string(rootCA)
|
||||
|
||||
intermediate, err := os.ReadFile(filepath.Join(certsFolder, "intermediate.pem"))
|
||||
require.NoError(t, err)
|
||||
certsString = certsString + string(intermediate)
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: server.ConfigURLForOrg("myorg"),
|
||||
ServerRootCA: certsString,
|
||||
}
|
||||
creds := &actions.ActionsAuth{
|
||||
Token: "token",
|
||||
}
|
||||
|
||||
client, err := newActionsClientFromConfig(config, creds)
|
||||
require.NoError(t, err)
|
||||
_, err = client.GetRunnerScaleSet(ctx, 1, "test")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, serverCalledSuccessfully)
|
||||
}
|
||||
|
||||
func TestProxySettings(t *testing.T) {
|
||||
t.Run("http", func(t *testing.T) {
|
||||
wentThroughProxy := false
|
||||
|
||||
proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
wentThroughProxy = true
|
||||
}))
|
||||
t.Cleanup(func() {
|
||||
proxy.Close()
|
||||
})
|
||||
|
||||
prevProxy := os.Getenv("http_proxy")
|
||||
os.Setenv("http_proxy", proxy.URL)
|
||||
defer os.Setenv("http_proxy", prevProxy)
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: "https://github.com/org/repo",
|
||||
}
|
||||
creds := &actions.ActionsAuth{
|
||||
Token: "token",
|
||||
}
|
||||
|
||||
client, err := newActionsClientFromConfig(config, creds)
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
require.NoError(t, err)
|
||||
_, err = client.Do(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, wentThroughProxy)
|
||||
})
|
||||
|
||||
t.Run("https", func(t *testing.T) {
|
||||
wentThroughProxy := false
|
||||
|
||||
proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
wentThroughProxy = true
|
||||
}))
|
||||
t.Cleanup(func() {
|
||||
proxy.Close()
|
||||
})
|
||||
|
||||
prevProxy := os.Getenv("https_proxy")
|
||||
os.Setenv("https_proxy", proxy.URL)
|
||||
defer os.Setenv("https_proxy", prevProxy)
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: "https://github.com/org/repo",
|
||||
}
|
||||
creds := &actions.ActionsAuth{
|
||||
Token: "token",
|
||||
}
|
||||
|
||||
client, err := newActionsClientFromConfig(config, creds, actions.WithRetryMax(0))
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "https://example.com", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.Do(req)
|
||||
// proxy doesn't support https
|
||||
assert.Error(t, err)
|
||||
assert.True(t, wentThroughProxy)
|
||||
})
|
||||
|
||||
t.Run("no_proxy", func(t *testing.T) {
|
||||
wentThroughProxy := false
|
||||
|
||||
proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
wentThroughProxy = true
|
||||
}))
|
||||
t.Cleanup(func() {
|
||||
proxy.Close()
|
||||
})
|
||||
|
||||
prevProxy := os.Getenv("http_proxy")
|
||||
os.Setenv("http_proxy", proxy.URL)
|
||||
defer os.Setenv("http_proxy", prevProxy)
|
||||
|
||||
prevNoProxy := os.Getenv("no_proxy")
|
||||
os.Setenv("no_proxy", "example.com")
|
||||
defer os.Setenv("no_proxy", prevNoProxy)
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: "https://github.com/org/repo",
|
||||
}
|
||||
creds := &actions.ActionsAuth{
|
||||
Token: "token",
|
||||
}
|
||||
|
||||
client, err := newActionsClientFromConfig(config, creds)
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://example.com", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, wentThroughProxy)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
)
|
||||
|
||||
//go:generate mockery --inpackage --name=RunnerScaleSetClient
|
||||
type RunnerScaleSetClient interface {
|
||||
GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error
|
||||
AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error
|
||||
}
|
||||
|
|
@ -1,343 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// label names
|
||||
const (
|
||||
labelKeyRunnerScaleSetName = "name"
|
||||
labelKeyRunnerScaleSetNamespace = "namespace"
|
||||
labelKeyEnterprise = "enterprise"
|
||||
labelKeyOrganization = "organization"
|
||||
labelKeyRepository = "repository"
|
||||
labelKeyJobName = "job_name"
|
||||
labelKeyJobWorkflowRef = "job_workflow_ref"
|
||||
labelKeyEventName = "event_name"
|
||||
labelKeyJobResult = "job_result"
|
||||
)
|
||||
|
||||
const githubScaleSetSubsystem = "gha"
|
||||
|
||||
// labels
|
||||
var (
|
||||
scaleSetLabels = []string{
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRepository,
|
||||
labelKeyOrganization,
|
||||
labelKeyEnterprise,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
}
|
||||
|
||||
jobLabels = []string{
|
||||
labelKeyRepository,
|
||||
labelKeyOrganization,
|
||||
labelKeyEnterprise,
|
||||
labelKeyJobName,
|
||||
labelKeyJobWorkflowRef,
|
||||
labelKeyEventName,
|
||||
}
|
||||
|
||||
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult)
|
||||
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult)
|
||||
startedJobsTotalLabels = jobLabels
|
||||
jobStartupDurationLabels = []string{
|
||||
labelKeyRepository,
|
||||
labelKeyOrganization,
|
||||
labelKeyEnterprise,
|
||||
labelKeyEventName,
|
||||
}
|
||||
)
|
||||
|
||||
// metrics
|
||||
var (
|
||||
// availableJobs = prometheus.NewGaugeVec(
|
||||
// prometheus.GaugeOpts{
|
||||
// Subsystem: githubScaleSetSubsystem,
|
||||
// Name: "available_jobs",
|
||||
// Help: "Number of jobs with `runs-on` matching the runner scale set name. Jobs are not yet assigned to the runner scale set.",
|
||||
// },
|
||||
// scaleSetLabels,
|
||||
// )
|
||||
//
|
||||
// acquiredJobs = prometheus.NewGaugeVec(
|
||||
// prometheus.GaugeOpts{
|
||||
// Subsystem: githubScaleSetSubsystem,
|
||||
// Name: "acquired_jobs",
|
||||
// Help: "Number of jobs acquired by the scale set.",
|
||||
// },
|
||||
// scaleSetLabels,
|
||||
// )
|
||||
|
||||
assignedJobs = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "assigned_jobs",
|
||||
Help: "Number of jobs assigned to this scale set.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
runningJobs = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "running_jobs",
|
||||
Help: "Number of jobs running (or about to be run).",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
registeredRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "registered_runners",
|
||||
Help: "Number of runners registered by the scale set.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
busyRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "busy_runners",
|
||||
Help: "Number of registered runners running a job.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
minRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "min_runners",
|
||||
Help: "Minimum number of runners.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
maxRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "max_runners",
|
||||
Help: "Maximum number of runners.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
desiredRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "desired_runners",
|
||||
Help: "Number of runners desired by the scale set.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
idleRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "idle_runners",
|
||||
Help: "Number of registered runners not running a job.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
startedJobsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "started_jobs_total",
|
||||
Help: "Total number of jobs started.",
|
||||
},
|
||||
startedJobsTotalLabels,
|
||||
)
|
||||
|
||||
completedJobsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "completed_jobs_total",
|
||||
Help: "Total number of jobs completed.",
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
},
|
||||
completedJobsTotalLabels,
|
||||
)
|
||||
|
||||
// jobQueueDurationSeconds = prometheus.NewHistogramVec(
|
||||
// prometheus.HistogramOpts{
|
||||
// Subsystem: githubScaleSetSubsystem,
|
||||
// Name: "job_queue_duration_seconds",
|
||||
// Help: "Time spent waiting for workflow jobs to get assigned to the scale set after queueing (in seconds).",
|
||||
// Buckets: runtimeBuckets,
|
||||
// },
|
||||
// jobLabels,
|
||||
// )
|
||||
|
||||
jobStartupDurationSeconds = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "job_startup_duration_seconds",
|
||||
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
|
||||
Buckets: runtimeBuckets,
|
||||
},
|
||||
jobStartupDurationLabels,
|
||||
)
|
||||
|
||||
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "job_execution_duration_seconds",
|
||||
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
|
||||
Buckets: runtimeBuckets,
|
||||
},
|
||||
jobExecutionDurationLabels,
|
||||
)
|
||||
)
|
||||
|
||||
var runtimeBuckets []float64 = []float64{
|
||||
0.01,
|
||||
0.05,
|
||||
0.1,
|
||||
0.5,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
12,
|
||||
15,
|
||||
18,
|
||||
20,
|
||||
25,
|
||||
30,
|
||||
40,
|
||||
50,
|
||||
60,
|
||||
70,
|
||||
80,
|
||||
90,
|
||||
100,
|
||||
110,
|
||||
120,
|
||||
150,
|
||||
180,
|
||||
210,
|
||||
240,
|
||||
300,
|
||||
360,
|
||||
420,
|
||||
480,
|
||||
540,
|
||||
600,
|
||||
900,
|
||||
1200,
|
||||
1800,
|
||||
2400,
|
||||
3000,
|
||||
3600,
|
||||
}
|
||||
|
||||
type metricsExporter struct {
|
||||
// Initialized during creation.
|
||||
baseLabels
|
||||
}
|
||||
|
||||
type baseLabels struct {
|
||||
scaleSetName string
|
||||
scaleSetNamespace string
|
||||
enterprise string
|
||||
organization string
|
||||
repository string
|
||||
}
|
||||
|
||||
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
||||
return prometheus.Labels{
|
||||
labelKeyEnterprise: b.enterprise,
|
||||
labelKeyOrganization: b.organization,
|
||||
labelKeyRepository: b.repository,
|
||||
labelKeyJobName: jobBase.JobDisplayName,
|
||||
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
||||
labelKeyEventName: jobBase.EventName,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
|
||||
return prometheus.Labels{
|
||||
labelKeyRunnerScaleSetName: b.scaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace: b.scaleSetNamespace,
|
||||
labelKeyEnterprise: b.enterprise,
|
||||
labelKeyOrganization: b.organization,
|
||||
labelKeyRepository: b.repository,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
|
||||
l := b.jobLabels(&msg.JobMessageBase)
|
||||
l[labelKeyJobResult] = msg.Result
|
||||
return l
|
||||
}
|
||||
|
||||
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
|
||||
l := b.jobLabels(&msg.JobMessageBase)
|
||||
return l
|
||||
}
|
||||
|
||||
func (b *baseLabels) jobStartupDurationLabels(msg *actions.JobStarted) prometheus.Labels {
|
||||
return prometheus.Labels{
|
||||
labelKeyEnterprise: b.enterprise,
|
||||
labelKeyOrganization: b.organization,
|
||||
labelKeyRepository: b.repository,
|
||||
labelKeyEventName: msg.EventName,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metricsExporter) withBaseLabels(base baseLabels) {
|
||||
m.baseLabels = base
|
||||
}
|
||||
|
||||
func (m *metricsExporter) publishStatic(max, min int) {
|
||||
l := m.scaleSetLabels()
|
||||
maxRunners.With(l).Set(float64(max))
|
||||
minRunners.With(l).Set(float64(min))
|
||||
}
|
||||
|
||||
func (m *metricsExporter) publishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
||||
l := m.scaleSetLabels()
|
||||
|
||||
// availableJobs.With(l).Set(float64(stats.TotalAvailableJobs))
|
||||
// acquiredJobs.With(l).Set(float64(stats.TotalAcquiredJobs))
|
||||
assignedJobs.With(l).Set(float64(stats.TotalAssignedJobs))
|
||||
runningJobs.With(l).Set(float64(stats.TotalRunningJobs))
|
||||
registeredRunners.With(l).Set(float64(stats.TotalRegisteredRunners))
|
||||
busyRunners.With(l).Set(float64(stats.TotalBusyRunners))
|
||||
idleRunners.With(l).Set(float64(stats.TotalIdleRunners))
|
||||
}
|
||||
|
||||
func (m *metricsExporter) publishJobStarted(msg *actions.JobStarted) {
|
||||
l := m.startedJobLabels(msg)
|
||||
startedJobsTotal.With(l).Inc()
|
||||
|
||||
l = m.jobStartupDurationLabels(msg)
|
||||
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
|
||||
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
|
||||
}
|
||||
|
||||
// func (m *metricsExporter) publishJobAssigned(msg *actions.JobAssigned) {
|
||||
// l := m.jobLabels(&msg.JobMessageBase)
|
||||
// queueDuration := msg.JobMessageBase.ScaleSetAssignTime.Unix() - msg.JobMessageBase.QueueTime.Unix()
|
||||
// jobQueueDurationSeconds.With(l).Observe(float64(queueDuration))
|
||||
// }
|
||||
|
||||
func (m *metricsExporter) publishJobCompleted(msg *actions.JobCompleted) {
|
||||
l := m.completedJobLabels(msg)
|
||||
completedJobsTotal.With(l).Inc()
|
||||
|
||||
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
|
||||
jobExecutionDurationSeconds.With(l).Observe(float64(executionDuration))
|
||||
}
|
||||
|
||||
func (m *metricsExporter) publishDesiredRunners(count int) {
|
||||
desiredRunners.With(m.scaleSetLabels()).Set(float64(count))
|
||||
}
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockKubernetesManager is an autogenerated mock type for the KubernetesManager type
|
||||
type MockKubernetesManager struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ScaleEphemeralRunnerSet provides a mock function with given fields: ctx, namespace, resourceName, runnerCount
|
||||
func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace string, resourceName string, runnerCount int) error {
|
||||
ret := _m.Called(ctx, namespace, resourceName, runnerCount)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, int) error); ok {
|
||||
r0 = rf(ctx, namespace, resourceName, runnerCount)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// UpdateEphemeralRunnerWithJobInfo provides a mock function with given fields: ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId
|
||||
func (_m *MockKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace string, resourceName string, ownerName string, repositoryName string, jobWorkflowRef string, jobDisplayName string, jobRequestId int64, workflowRunId int64) error {
|
||||
ret := _m.Called(ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string, string, int64, int64) error); ok {
|
||||
r0 = rf(ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewMockKubernetesManager creates a new instance of MockKubernetesManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockKubernetesManager(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockKubernetesManager {
|
||||
mock := &MockKubernetesManager{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
// Code generated by mockery v2.36.1. DO NOT EDIT.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockRunnerScaleSetClient is an autogenerated mock type for the RunnerScaleSetClient type
|
||||
type MockRunnerScaleSetClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// AcquireJobsForRunnerScaleSet provides a mock function with given fields: ctx, requestIds
|
||||
func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
|
||||
ret := _m.Called(ctx, requestIds)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, []int64) error); ok {
|
||||
r0 = rf(ctx, requestIds)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler, maxCapacity
|
||||
func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
||||
ret := _m.Called(ctx, handler, maxCapacity)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, func(*actions.RunnerScaleSetMessage) error, int) error); ok {
|
||||
r0 = rf(ctx, handler, maxCapacity)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewMockRunnerScaleSetClient creates a new instance of MockRunnerScaleSetClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockRunnerScaleSetClient(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockRunnerScaleSetClient {
|
||||
mock := &MockRunnerScaleSetClient{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
|
@ -1,127 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type SessionRefreshingClient struct {
|
||||
client actions.ActionsService
|
||||
logger logr.Logger
|
||||
session *actions.RunnerScaleSetSession
|
||||
}
|
||||
|
||||
func newSessionClient(client actions.ActionsService, logger *logr.Logger, session *actions.RunnerScaleSetSession) *SessionRefreshingClient {
|
||||
return &SessionRefreshingClient{
|
||||
client: client,
|
||||
session: session,
|
||||
logger: logger.WithName("refreshing_client"),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
|
||||
if maxCapacity < 0 {
|
||||
return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0")
|
||||
}
|
||||
|
||||
message, err := m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId, maxCapacity)
|
||||
if err == nil {
|
||||
return message, nil
|
||||
}
|
||||
|
||||
expiredError := &actions.MessageQueueTokenExpiredError{}
|
||||
if !errors.As(err, &expiredError) {
|
||||
return nil, fmt.Errorf("get message failed. %w", err)
|
||||
}
|
||||
|
||||
m.logger.Info("message queue token is expired during GetNextMessage, refreshing...")
|
||||
session, err := m.client.RefreshMessageSession(ctx, m.session.RunnerScaleSet.Id, m.session.SessionId)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("refresh message session failed. %w", err)
|
||||
}
|
||||
|
||||
m.session = session
|
||||
message, err = m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId, maxCapacity)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("delete message failed after refresh message session. %w", err)
|
||||
}
|
||||
|
||||
return message, nil
|
||||
}
|
||||
|
||||
func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId int64) error {
|
||||
err := m.client.DeleteMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, messageId)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
expiredError := &actions.MessageQueueTokenExpiredError{}
|
||||
if !errors.As(err, &expiredError) {
|
||||
return fmt.Errorf("delete message failed. %w", err)
|
||||
}
|
||||
|
||||
m.logger.Info("message queue token is expired during DeleteMessage, refreshing...")
|
||||
session, err := m.client.RefreshMessageSession(ctx, m.session.RunnerScaleSet.Id, m.session.SessionId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh message session failed. %w", err)
|
||||
}
|
||||
|
||||
m.session = session
|
||||
err = m.client.DeleteMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, messageId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete message failed after refresh message session. %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (m *SessionRefreshingClient) AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) {
|
||||
ids, err := m.client.AcquireJobs(ctx, m.session.RunnerScaleSet.Id, m.session.MessageQueueAccessToken, requestIds)
|
||||
if err == nil {
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
expiredError := &actions.MessageQueueTokenExpiredError{}
|
||||
if !errors.As(err, &expiredError) {
|
||||
return nil, fmt.Errorf("acquire jobs failed. %w", err)
|
||||
}
|
||||
|
||||
m.logger.Info("message queue token is expired during AcquireJobs, refreshing...")
|
||||
session, err := m.client.RefreshMessageSession(ctx, m.session.RunnerScaleSet.Id, m.session.SessionId)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("refresh message session failed. %w", err)
|
||||
}
|
||||
|
||||
m.session = session
|
||||
ids, err = m.client.AcquireJobs(ctx, m.session.RunnerScaleSet.Id, m.session.MessageQueueAccessToken, requestIds)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acquire jobs failed after refresh message session. %w", err)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
func (m *SessionRefreshingClient) Close() error {
|
||||
if m.session == nil {
|
||||
m.logger.Info("session is already deleted. (no-op)")
|
||||
return nil
|
||||
}
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(context.Background(), time.Second*30)
|
||||
defer cancel()
|
||||
|
||||
m.logger.Info("deleting session.")
|
||||
err := m.client.DeleteMessageSession(ctxWithTimeout, m.session.RunnerScaleSet.Id, m.session.SessionId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete message session failed. %w", err)
|
||||
}
|
||||
|
||||
m.session = nil
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,421 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetMessage(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
|
||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, nil).Once()
|
||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(&actions.RunnerScaleSetMessage{MessageId: 1}, nil).Once()
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
|
||||
msg, err := client.GetMessage(ctx, 0, 10)
|
||||
require.NoError(t, err, "GetMessage should not return an error")
|
||||
|
||||
assert.Nil(t, msg, "GetMessage should return nil message")
|
||||
|
||||
msg, err = client.GetMessage(ctx, 0, 10)
|
||||
require.NoError(t, err, "GetMessage should not return an error")
|
||||
|
||||
assert.Equal(t, int64(1), msg.MessageId, "GetMessage should return a message with id 1")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
|
||||
}
|
||||
|
||||
func TestDeleteMessage(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
|
||||
mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(1)).Return(nil).Once()
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
|
||||
err := client.DeleteMessage(ctx, int64(1))
|
||||
assert.NoError(t, err, "DeleteMessage should not return an error")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
|
||||
}
|
||||
|
||||
func TestAcquireJobs(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("AcquireJobs", ctx, mock.Anything, "token", mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return([]int64{1}, nil)
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
|
||||
ids, err := client.AcquireJobs(ctx, []int64{1, 2, 3})
|
||||
assert.NoError(t, err, "AcquireJobs should not return an error")
|
||||
assert.Equal(t, []int64{1}, ids, "AcquireJobs should return a slice with one id")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
|
||||
mockActionsClient.On("DeleteMessageSession", mock.Anything, 1, &sessionId).Return(nil).Once()
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
|
||||
err := client.Close()
|
||||
assert.NoError(t, err, "DeleteMessageSession should not return an error")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
|
||||
}
|
||||
|
||||
func TestGetMessage_Error(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
|
||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, fmt.Errorf("error")).Once()
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
|
||||
msg, err := client.GetMessage(ctx, 0, 10)
|
||||
assert.ErrorContains(t, err, "get message failed. error", "GetMessage should return an error")
|
||||
assert.Nil(t, msg, "GetMessage should return nil message")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
|
||||
}
|
||||
|
||||
func TestDeleteMessage_SessionError(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
|
||||
mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(1)).Return(fmt.Errorf("error")).Once()
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
|
||||
err := client.DeleteMessage(ctx, int64(1))
|
||||
assert.ErrorContains(t, err, "delete message failed. error", "DeleteMessage should return an error")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
|
||||
}
|
||||
|
||||
func TestAcquireJobs_Error(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("AcquireJobs", ctx, mock.Anything, "token", mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return(nil, fmt.Errorf("error")).Once()
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
|
||||
ids, err := client.AcquireJobs(ctx, []int64{1, 2, 3})
|
||||
assert.ErrorContains(t, err, "acquire jobs failed. error", "AcquireJobs should return an error")
|
||||
assert.Nil(t, ids, "AcquireJobs should return nil ids")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made")
|
||||
}
|
||||
|
||||
func TestGetMessage_RefreshToken(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, "token2", int64(0), 10).Return(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "test",
|
||||
Body: "test",
|
||||
}, nil).Once()
|
||||
mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(&actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token2",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}, nil).Once()
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
msg, err := client.GetMessage(ctx, 0, 10)
|
||||
assert.NoError(t, err, "Error getting message")
|
||||
assert.Equal(t, int64(1), msg.MessageId, "message id should be updated")
|
||||
assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestDeleteMessage_RefreshSessionToken(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
|
||||
mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(1)).Return(&actions.MessageQueueTokenExpiredError{}).Once()
|
||||
mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, "token2", int64(1)).Return(nil).Once()
|
||||
mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(&actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token2",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
err := client.DeleteMessage(ctx, 1)
|
||||
assert.NoError(t, err, "Error delete message")
|
||||
assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestAcquireJobs_RefreshToken(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
|
||||
mockActionsClient.On("AcquireJobs", ctx, mock.Anything, session.MessageQueueAccessToken, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
||||
mockActionsClient.On("AcquireJobs", ctx, mock.Anything, "token2", mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return([]int64{1, 2, 3}, nil)
|
||||
mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(&actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token2",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
ids, err := client.AcquireJobs(ctx, []int64{1, 2, 3})
|
||||
assert.NoError(t, err, "Error acquiring jobs")
|
||||
assert.Equal(t, []int64{1, 2, 3}, ids, "Job ids should be returned")
|
||||
assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestGetMessage_RefreshToken_Failed(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
||||
mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error"))
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
msg, err := client.GetMessage(ctx, 0, 10)
|
||||
assert.ErrorContains(t, err, "refresh message session failed. error", "Error should be returned")
|
||||
assert.Nil(t, msg, "Message should be nil")
|
||||
assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestDeleteMessage_RefreshToken_Failed(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
mockActionsClient.On("DeleteMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(1)).Return(&actions.MessageQueueTokenExpiredError{}).Once()
|
||||
mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error"))
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
err := client.DeleteMessage(ctx, 1)
|
||||
|
||||
assert.ErrorContains(t, err, "refresh message session failed. error", "Error getting message")
|
||||
assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestAcquireJobs_RefreshToken_Failed(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx := context.Background()
|
||||
sessionId := uuid.New()
|
||||
session := &actions.RunnerScaleSetSession{
|
||||
SessionId: &sessionId,
|
||||
OwnerName: "owner",
|
||||
MessageQueueUrl: "https://github.com",
|
||||
MessageQueueAccessToken: "token",
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
},
|
||||
}
|
||||
|
||||
mockActionsClient.On("AcquireJobs", ctx, mock.Anything, session.MessageQueueAccessToken, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 && ids[1] == 2 && ids[2] == 3 })).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once()
|
||||
mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error"))
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, session)
|
||||
ids, err := client.AcquireJobs(ctx, []int64{1, 2, 3})
|
||||
assert.ErrorContains(t, err, "refresh message session failed. error", "Expect error refreshing message session")
|
||||
assert.Nil(t, ids, "Job ids should be nil")
|
||||
assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
||||
func TestClose_Skip(t *testing.T) {
|
||||
mockActionsClient := &actions.MockActionsService{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
client := newSessionClient(mockActionsClient, &logger, nil)
|
||||
err := client.Close()
|
||||
require.NoError(t, err, "Error closing session client")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
|
|
@ -284,15 +284,14 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
if listenerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener pod")
|
||||
if err := r.Delete(ctx, listenerPod); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener pod: %v", err)
|
||||
return false, fmt.Errorf("failed to delete listener pod: %w", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener pods: %v", err)
|
||||
|
||||
default: // NOT FOUND
|
||||
case kerrors.IsNotFound(err):
|
||||
_ = r.publishRunningListener(autoscalingListener, false) // If error is returned, we never published metrics so it is safe to ignore
|
||||
default:
|
||||
return false, fmt.Errorf("failed to get listener pods: %w", err)
|
||||
}
|
||||
logger.Info("Listener pod is deleted")
|
||||
|
||||
|
|
@ -303,12 +302,12 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener config secret")
|
||||
if err := r.Delete(ctx, &secret); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener config secret: %v", err)
|
||||
return false, fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener config secret: %v", err)
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener config secret: %w", err)
|
||||
}
|
||||
|
||||
if autoscalingListener.Spec.Proxy != nil {
|
||||
|
|
@ -320,12 +319,12 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
if proxySecret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener proxy secret")
|
||||
if err := r.Delete(ctx, proxySecret); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener proxy secret: %v", err)
|
||||
return false, fmt.Errorf("failed to delete listener proxy secret: %w", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener proxy secret: %v", err)
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener proxy secret: %w", err)
|
||||
}
|
||||
logger.Info("Listener proxy secret is deleted")
|
||||
}
|
||||
|
|
@ -337,12 +336,12 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
if listenerRoleBinding.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener role binding")
|
||||
if err := r.Delete(ctx, listenerRoleBinding); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener role binding: %v", err)
|
||||
return false, fmt.Errorf("failed to delete listener role binding: %w", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener role binding: %v", err)
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener role binding: %w", err)
|
||||
}
|
||||
logger.Info("Listener role binding is deleted")
|
||||
|
||||
|
|
@ -353,12 +352,12 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
if listenerRole.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener role")
|
||||
if err := r.Delete(ctx, listenerRole); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener role: %v", err)
|
||||
return false, fmt.Errorf("failed to delete listener role: %w", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener role: %v", err)
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener role: %w", err)
|
||||
}
|
||||
logger.Info("Listener role is deleted")
|
||||
|
||||
|
|
@ -370,12 +369,12 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
if listenerSa.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener service account")
|
||||
if err := r.Delete(ctx, listenerSa); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener service account: %v", err)
|
||||
return false, fmt.Errorf("failed to delete listener service account: %w", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener service account: %v", err)
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener service account: %w", err)
|
||||
}
|
||||
logger.Info("Listener service account is deleted")
|
||||
|
||||
|
|
@ -447,7 +446,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||
var err error
|
||||
cert, err = r.certificate(ctx, autoscalingRunnerSet, autoscalingListener)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to create certificate env var for listener: %v", err)
|
||||
return ctrl.Result{}, fmt.Errorf("failed to create certificate env var for listener: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
listenerconfig "github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
|||
|
|
@ -335,12 +335,12 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
|
|||
if listener.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener")
|
||||
if err := r.Delete(ctx, &listener); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener: %v", err)
|
||||
return false, fmt.Errorf("failed to delete listener: %w", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener: %v", err)
|
||||
case !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("Listener is deleted")
|
||||
|
|
@ -351,7 +351,7 @@ func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.
|
|||
logger.Info("Cleaning up ephemeral runner sets")
|
||||
runnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||
return false, fmt.Errorf("failed to list ephemeral runner sets: %w", err)
|
||||
}
|
||||
if runnerSets.empty() {
|
||||
logger.Info("All ephemeral runner sets are deleted")
|
||||
|
|
@ -360,7 +360,7 @@ func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.
|
|||
|
||||
logger.Info("Deleting all ephemeral runner sets", "count", runnerSets.count())
|
||||
if err := r.deleteEphemeralRunnerSets(ctx, runnerSets.all(), logger); err != nil {
|
||||
return false, fmt.Errorf("failed to delete ephemeral runner sets: %v", err)
|
||||
return false, fmt.Errorf("failed to delete ephemeral runner sets: %w", err)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -375,7 +375,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
|
|||
}
|
||||
logger.Info("Deleting ephemeral runner set", "name", rs.Name)
|
||||
if err := r.Delete(ctx, rs); err != nil {
|
||||
return fmt.Errorf("failed to delete EphemeralRunnerSet resource: %v", err)
|
||||
return fmt.Errorf("failed to delete EphemeralRunnerSet resource: %w", err)
|
||||
}
|
||||
logger.Info("Deleted ephemeral runner set", "name", rs.Name)
|
||||
}
|
||||
|
|
@ -670,7 +670,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
|||
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
||||
list := new(v1alpha1.EphemeralRunnerSetList)
|
||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||
return nil, fmt.Errorf("failed to list ephemeral runner sets: %w", err)
|
||||
}
|
||||
|
||||
return &EphemeralRunnerSets{list: list}, nil
|
||||
|
|
@ -814,7 +814,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
|||
}
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
case !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode role binding: %w", err)
|
||||
return
|
||||
default:
|
||||
|
|
@ -856,11 +856,11 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
|||
}
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes role")
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode role: %w", err)
|
||||
case kerrors.IsNotFound(err):
|
||||
c.logger.Info("Container mode kubernetes role has already been deleted", "name", roleName)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes role has already been deleted", "name", roleName)
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode role: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -899,11 +899,11 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
|
|||
}
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes service account")
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode service account: %w", err)
|
||||
case kerrors.IsNotFound(err):
|
||||
c.logger.Info("Container mode kubernetes service account has already been deleted", "name", serviceAccountName)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes service account has already been deleted", "name", serviceAccountName)
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode service account: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -942,11 +942,11 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
|
|||
}
|
||||
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch service account: %w", err)
|
||||
case kerrors.IsNotFound(err):
|
||||
c.logger.Info("No permission service account has already been deleted", "name", serviceAccountName)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("No permission service account has already been deleted", "name", serviceAccountName)
|
||||
c.err = fmt.Errorf("failed to fetch service account: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -985,11 +985,11 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
|
|||
}
|
||||
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
|
||||
c.err = fmt.Errorf("failed to fetch GitHub secret: %w", err)
|
||||
case kerrors.IsNotFound(err) || kerrors.IsForbidden(err):
|
||||
c.logger.Info("GitHub secret has already been deleted", "name", githubSecretName)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("GitHub secret has already been deleted", "name", githubSecretName)
|
||||
c.err = fmt.Errorf("failed to fetch GitHub secret: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -1028,11 +1028,11 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
|
|||
}
|
||||
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch manager role binding: %w", err)
|
||||
case kerrors.IsNotFound(err):
|
||||
c.logger.Info("Manager role binding has already been deleted", "name", managerRoleBindingName)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Manager role binding has already been deleted", "name", managerRoleBindingName)
|
||||
c.err = fmt.Errorf("failed to fetch manager role binding: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -1071,11 +1071,11 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali
|
|||
}
|
||||
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch manager role: %w", err)
|
||||
case kerrors.IsNotFound(err):
|
||||
c.logger.Info("Manager role has already been deleted", "name", managerRoleName)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Manager role has already been deleted", "name", managerRoleName)
|
||||
c.err = fmt.Errorf("failed to fetch manager role: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -344,7 +344,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
|
|||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the runner pod")
|
||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("failed to delete pod: %v", err)
|
||||
return false, fmt.Errorf("failed to delete pod: %w", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
|
|
@ -361,7 +361,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
|
|||
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the jitconfig secret")
|
||||
if err := r.Delete(ctx, secret); err != nil && !kerrors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("failed to delete secret: %v", err)
|
||||
return false, fmt.Errorf("failed to delete secret: %w", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
|
|
@ -377,7 +377,7 @@ func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.C
|
|||
log.Info("Cleaning up runner linked pods")
|
||||
done, err = r.cleanupRunnerLinkedPods(ctx, ephemeralRunner, log)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to clean up runner linked pods: %v", err)
|
||||
return false, fmt.Errorf("failed to clean up runner linked pods: %w", err)
|
||||
}
|
||||
|
||||
if !done {
|
||||
|
|
@ -402,7 +402,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
|
|||
var runnerLinkedPodList corev1.PodList
|
||||
err = r.List(ctx, &runnerLinkedPodList, client.InNamespace(ephemeralRunner.Namespace), runnerLinedLabels)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list runner-linked pods: %v", err)
|
||||
return false, fmt.Errorf("failed to list runner-linked pods: %w", err)
|
||||
}
|
||||
|
||||
if len(runnerLinkedPodList.Items) == 0 {
|
||||
|
|
@ -421,7 +421,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
|
|||
|
||||
log.Info("Deleting container hooks runner-linked pod", "name", linkedPod.Name)
|
||||
if err := r.Delete(ctx, linkedPod); err != nil && !kerrors.IsNotFound(err) {
|
||||
errs = append(errs, fmt.Errorf("failed to delete runner linked pod %q: %v", linkedPod.Name, err))
|
||||
errs = append(errs, fmt.Errorf("failed to delete runner linked pod %q: %w", linkedPod.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -456,7 +456,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
|
|||
|
||||
log.Info("Deleting container hooks runner-linked secret", "name", s.Name)
|
||||
if err := r.Delete(ctx, s); err != nil && !kerrors.IsNotFound(err) {
|
||||
errs = append(errs, fmt.Errorf("failed to delete runner linked secret %q: %v", s.Name, err))
|
||||
errs = append(errs, fmt.Errorf("failed to delete runner linked secret %q: %w", s.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -470,12 +470,12 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
|
|||
obj.Status.Reason = reason
|
||||
obj.Status.Message = errMessage
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %v", err)
|
||||
return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Removing the runner from the service")
|
||||
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
|
||||
return fmt.Errorf("failed to remove the runner from service: %v", err)
|
||||
return fmt.Errorf("failed to remove the runner from service: %w", err)
|
||||
}
|
||||
|
||||
log.Info("EphemeralRunner is marked as Failed and deleted from the service")
|
||||
|
|
@ -487,7 +487,7 @@ func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemera
|
|||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.Phase = corev1.PodSucceeded
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update ephemeral runner with status finished: %v", err)
|
||||
return fmt.Errorf("failed to update ephemeral runner with status finished: %w", err)
|
||||
}
|
||||
|
||||
log.Info("EphemeralRunner status is marked as Finished")
|
||||
|
|
@ -500,7 +500,7 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
|||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
|
||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete pod with status failed: %v", err)
|
||||
return fmt.Errorf("failed to delete pod with status failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -514,7 +514,7 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
|||
obj.Status.Reason = pod.Status.Reason
|
||||
obj.Status.Message = pod.Status.Message
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update ephemeral runner status: failed attempts: %v", err)
|
||||
return fmt.Errorf("failed to update ephemeral runner status: failed attempts: %w", err)
|
||||
}
|
||||
|
||||
log.Info("EphemeralRunner pod is deleted and status is updated with failure count")
|
||||
|
|
@ -528,7 +528,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
|||
log.Info("Creating ephemeral runner JIT config")
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %w", err)
|
||||
}
|
||||
|
||||
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
|
||||
|
|
@ -546,12 +546,12 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
|||
if err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %w", err)
|
||||
}
|
||||
|
||||
if actionsError.StatusCode != http.StatusConflict ||
|
||||
!actionsError.IsException("AgentExistsException") {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %w", err)
|
||||
}
|
||||
|
||||
// If the runner with the name we want already exists it means:
|
||||
|
|
@ -564,7 +564,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
|||
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
|
||||
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %w", err)
|
||||
}
|
||||
|
||||
if existingRunner == nil {
|
||||
|
|
@ -577,7 +577,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
|||
log.Info("Removing the runner with the same name")
|
||||
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
|
||||
|
|
@ -586,7 +586,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
|||
|
||||
// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
|
||||
// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
|
||||
return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %w", err)
|
||||
}
|
||||
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
|
||||
|
||||
|
|
@ -597,7 +597,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
|||
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
|
||||
})
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
|
||||
}
|
||||
|
||||
// We want to continue without a requeue for faster pod creation.
|
||||
|
|
@ -691,12 +691,12 @@ func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1
|
|||
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Created new secret spec for ephemeral runner")
|
||||
if err := r.Create(ctx, jitSecret); err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
|
||||
|
|
@ -743,7 +743,7 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
|
|||
obj.Status.Message = pod.Status.Message
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update runner status for Phase/Reason/Message/Ready: %v", err)
|
||||
return fmt.Errorf("failed to update runner status for Phase/Reason/Message/Ready: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Updated ephemeral runner status")
|
||||
|
|
@ -835,7 +835,7 @@ func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Conte
|
|||
|
||||
if actionsError.StatusCode != http.StatusNotFound ||
|
||||
!actionsError.IsException("AgentNotFoundException") {
|
||||
return false, fmt.Errorf("failed to check if runner exists in GitHub service: %v", err)
|
||||
return false, fmt.Errorf("failed to check if runner exists in GitHub service: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Runner does not exist in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
|
|
@ -849,7 +849,7 @@ func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Conte
|
|||
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
client, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get actions client for runner: %v", err)
|
||||
return fmt.Errorf("failed to get actions client for runner: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Removing runner from the service", "runnerId", ephemeralRunner.Status.RunnerId)
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
|
|||
proxySecret.Name = proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet)
|
||||
|
||||
if err := r.Delete(ctx, proxySecret); err != nil && !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete proxy secret: %v", err)
|
||||
return fmt.Errorf("failed to delete proxy secret: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Deleted proxy secret")
|
||||
|
|
@ -287,7 +287,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
|
|||
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list child ephemeral runners: %v", err)
|
||||
return false, fmt.Errorf("failed to list child ephemeral runners: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Actual Ephemeral runner counts", "count", len(ephemeralRunnerList.Items))
|
||||
|
|
@ -441,7 +441,7 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
|
|||
}
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create actions client for ephemeral runner replica set: %v", err)
|
||||
return fmt.Errorf("failed to create actions client for ephemeral runner replica set: %w", err)
|
||||
}
|
||||
var errs []error
|
||||
deletedCount := 0
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
listenerconfig "github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
|
|
|
|||
Loading…
Reference in New Issue