Introduce new helm charts for the preview auto-scaling mode for ARC. (#2168)

This commit is contained in:
Tingluo Huang 2023-01-17 14:36:04 -05:00 committed by GitHub
parent c4d3cff3df
commit 0324658a3f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 15189 additions and 0 deletions

View File

@ -116,6 +116,10 @@ manifests-gen-crds: controller-gen yq
chart-crds:
cp config/crd/bases/*.yaml charts/actions-runner-controller/crds/
cp config/crd/bases/actions.github.com_autoscalingrunnersets.yaml charts/actions-runner-controller-2/crds/
cp config/crd/bases/actions.github.com_autoscalinglisteners.yaml charts/actions-runner-controller-2/crds/
cp config/crd/bases/actions.github.com_ephemeralrunnersets.yaml charts/actions-runner-controller-2/crds/
cp config/crd/bases/actions.github.com_ephemeralrunners.yaml charts/actions-runner-controller-2/crds/
rm charts/actions-runner-controller/crds/actions.github.com_autoscalingrunnersets.yaml
rm charts/actions-runner-controller/crds/actions.github.com_autoscalinglisteners.yaml
rm charts/actions-runner-controller/crds/actions.github.com_ephemeralrunnersets.yaml

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,33 @@
apiVersion: v2
name: actions-runner-controller-2
description: A Helm chart for install actions-runner-controller CRD
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "preview"
home: https://github.com/actions/actions-runner-controller
sources:
- "https://github.com/actions/actions-runner-controller"
maintainers:
- name: actions
url: https://github.com/actions

View File

@ -0,0 +1,97 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: autoscalinglisteners.actions.github.com
spec:
group: actions.github.com
names:
kind: AutoscalingListener
listKind: AutoscalingListenerList
plural: autoscalinglisteners
singular: autoscalinglistener
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.githubConfigUrl
name: GitHub Configure URL
type: string
- jsonPath: .spec.autoscalingRunnerSetNamespace
name: AutoscalingRunnerSet Namespace
type: string
- jsonPath: .spec.autoscalingRunnerSetName
name: AutoscalingRunnerSet Name
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: AutoscalingListener is the Schema for the autoscalinglisteners API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: AutoscalingListenerSpec defines the desired state of AutoscalingListener
properties:
autoscalingRunnerSetName:
description: Required
type: string
autoscalingRunnerSetNamespace:
description: Required
type: string
ephemeralRunnerSetName:
description: Required
type: string
githubConfigSecret:
description: Required
type: string
githubConfigUrl:
description: Required
type: string
image:
description: Required
type: string
imagePullSecrets:
description: Required
items:
description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
properties:
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
type: object
type: array
maxRunners:
description: Required
minimum: 0
type: integer
minRunners:
description: Required
minimum: 0
type: integer
runnerScaleSetId:
description: Required
type: integer
type: object
status:
description: AutoscalingListenerStatus defines the observed state of AutoscalingListener
type: object
type: object
served: true
storage: true
subresources:
status: {}
preserveUnknownFields: false
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,3 @@
Thank you for installing {{ .Chart.Name }}.
Your release is named {{ .Release.Name }}.

View File

@ -0,0 +1,97 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "actions-runner-controller-2.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "actions-runner-controller-2.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "actions-runner-controller-2.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "actions-runner-controller-2.labels" -}}
helm.sh/chart: {{ include "actions-runner-controller-2.chart" . }}
{{ include "actions-runner-controller-2.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/part-of: {{ .Chart.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- range $k, $v := .Values.labels }}
{{ $k }}: {{ $v }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "actions-runner-controller-2.selectorLabels" -}}
app.kubernetes.io/name: {{ include "actions-runner-controller-2.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "actions-runner-controller-2.serviceAccountName" -}}
{{- if eq .Values.serviceAccount.name "default"}}
{{- fail "serviceAccount.name cannot be set to 'default'" }}
{{- end }}
{{- if .Values.serviceAccount.create }}
{{- default (include "actions-runner-controller-2.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- if not .Values.serviceAccount.name }}
{{- fail "serviceAccount.name must be set if serviceAccount.create is false" }}
{{- else }}
{{- .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{- end }}
{{- define "actions-runner-controller-2.managerRoleName" -}}
{{- include "actions-runner-controller-2.fullname" . }}-manager-role
{{- end }}
{{- define "actions-runner-controller-2.managerRoleBinding" -}}
{{- include "actions-runner-controller-2.fullname" . }}-manager-rolebinding
{{- end }}
{{- define "actions-runner-controller-2.leaderElectionRoleName" -}}
{{- include "actions-runner-controller-2.fullname" . }}-leader-election-role
{{- end }}
{{- define "actions-runner-controller-2.leaderElectionRoleBinding" -}}
{{- include "actions-runner-controller-2.fullname" . }}-leader-election-rolebinding
{{- end }}
{{- define "actions-runner-controller-2.imagePullSecretsNames" -}}
{{- $names := list }}
{{- range $k, $v := . }}
{{- $names = append $names $v.name }}
{{- end }}
{{- $names | join ","}}
{{- end }}

View File

@ -0,0 +1,98 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "actions-runner-controller-2.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "actions-runner-controller-2.labels" . | nindent 4 }}
spec:
replicas: {{ default 1 .Values.replicaCount }}
selector:
matchLabels:
{{- include "actions-runner-controller-2.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: "manager"
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
app.kubernetes.io/part-of: actions-runner-controller
app.kubernetes.io/component: controller-manager
app.kubernetes.io/version: {{ .Chart.Version }}
{{- include "actions-runner-controller-2.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "actions-runner-controller-2.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.priorityClassName }}
priorityClassName: "{{ . }}"
{{- end }}
containers:
- name: manager
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "--auto-scaling-runner-set-only"
{{- if gt (int (default 1 .Values.replicaCount)) 1 }}
- "--enable-leader-election"
- "--leader-election-id={{ include "actions-runner-controller-2.fullname" . }}"
{{- end }}
{{- with .Values.imagePullSecrets }}
- "--auto-scaler-image-pull-secrets={{ include "actions-runner-controller-2.imagePullSecretsNames" . }}"
{{- end }}
command:
- "/manager"
env:
- name: CONTROLLER_MANAGER_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: CONTROLLER_MANAGER_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.env }}
{{- if kindIs "slice" .Values.env }}
{{- toYaml .Values.env | nindent 8 }}
{{- else }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /tmp
name: tmp
terminationGracePeriodSeconds: 10
volumes:
- name: tmp
emptyDir: {}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "actions-runner-controller-2.leaderElectionRoleName" . }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if gt (int (default 1 .Values.replicaCount)) 1 -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "actions-runner-controller-2.leaderElectionRoleBinding" . }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "actions-runner-controller-2.leaderElectionRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller-2.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,250 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "actions-runner-controller-2.managerRoleName" . }}
rules:
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets/finalizers
verbs:
- update
- apiGroups:
- actions.github.com
resources:
- autoscalingrunnersets/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- autoscalinglisteners/finalizers
verbs:
- update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets/status
verbs:
- get
- patch
- update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunners/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- namespaces
- pods
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- namespaces/status
- pods/status
verbs:
- get
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/finalizers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- create
- delete
- get
- update
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
verbs:
- create
- delete
- get
- update
- list
- watch
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- get
- apiGroups:
- ""
resources:
- pods/log
verbs:
- get
- list
- watch
- apiGroups:
- "batch"
resources:
- jobs
verbs:
- get
- list
- create
- delete

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "actions-runner-controller-2.managerRoleBinding" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "actions-runner-controller-2.managerRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "actions-runner-controller-2.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "actions-runner-controller-2.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "actions-runner-controller-2.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,508 @@
package tests
import (
"path/filepath"
"strings"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
)
func TestTemplate_CreateServiceAccount(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "true",
"serviceAccount.annotations.foo": "bar",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"})
var serviceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
assert.Equal(t, namespaceName, serviceAccount.Namespace)
assert.Equal(t, "test-arc-actions-runner-controller-2", serviceAccount.Name)
assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"]))
}
func TestTemplate_CreateServiceAccount_OverwriteName(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "true",
"serviceAccount.name": "overwritten-name",
"serviceAccount.annotations.foo": "bar",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"})
var serviceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
assert.Equal(t, namespaceName, serviceAccount.Namespace)
assert.Equal(t, "overwritten-name", serviceAccount.Name)
assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"]))
}
func TestTemplate_CreateServiceAccount_CannotUseDefaultServiceAccount(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "true",
"serviceAccount.name": "default",
"serviceAccount.annotations.foo": "bar",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"})
assert.ErrorContains(t, err, "serviceAccount.name cannot be set to 'default'", "We should get an error because the default service account cannot be used")
}
func TestTemplate_NotCreateServiceAccount(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "false",
"serviceAccount.name": "overwritten-name",
"serviceAccount.annotations.foo": "bar",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/serviceaccount.yaml"})
assert.ErrorContains(t, err, "could not find template templates/serviceaccount.yaml in chart", "We should get an error because the template should be skipped")
}
func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "false",
"serviceAccount.annotations.foo": "bar",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
assert.ErrorContains(t, err, "serviceAccount.name must be set if serviceAccount.create is false", "We should get an error because the default service account cannot be used")
}
func TestTemplate_CreateManagerRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role.yaml"})
var managerRole rbacv1.ClusterRole
helm.UnmarshalK8SYaml(t, output, &managerRole)
assert.Empty(t, managerRole.Namespace, "ClusterRole should not have a namespace")
assert.Equal(t, "test-arc-actions-runner-controller-2-manager-role", managerRole.Name)
assert.Equal(t, 25, len(managerRole.Rules))
}
func TestTemplate_ManagerRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"serviceAccount.create": "true",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_role_binding.yaml"})
var managerRoleBinding rbacv1.ClusterRoleBinding
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
assert.Empty(t, managerRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
assert.Equal(t, "test-arc-actions-runner-controller-2-manager-rolebinding", managerRoleBinding.Name)
assert.Equal(t, "test-arc-actions-runner-controller-2-manager-role", managerRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-actions-runner-controller-2", managerRoleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, managerRoleBinding.Subjects[0].Namespace)
}
func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"image.tag": "dev",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
assert.Equal(t, namespaceName, deployment.Namespace)
assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Name)
assert.Equal(t, "actions-runner-controller-2-0.1.0", deployment.Labels["helm.sh/chart"])
assert.Equal(t, "actions-runner-controller-2", deployment.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "preview", deployment.Labels["app.kubernetes.io/version"])
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
assert.Equal(t, "actions-runner-controller-2", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
assert.Equal(t, "actions-runner-controller-2", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Spec.Template.Spec.ServiceAccountName)
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 0)
assert.Nil(t, deployment.Spec.Template.Spec.Affinity)
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 0)
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 1)
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
}
func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"labels.foo": "bar",
"labels.github": "actions",
"replicaCount": "1",
"image.pullPolicy": "Always",
"image.tag": "dev",
"imagePullSecrets[0].name": "dockerhub",
"nameOverride": "actions-runner-controller-2-override",
"fullnameOverride": "actions-runner-controller-2-fullname-override",
"serviceAccount.name": "actions-runner-controller-2-sa",
"podAnnotations.foo": "bar",
"podSecurityContext.fsGroup": "1000",
"securityContext.runAsUser": "1000",
"securityContext.runAsNonRoot": "true",
"resources.limits.cpu": "500m",
"nodeSelector.foo": "bar",
"tolerations[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
"priorityClassName": "test-priority-class",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
assert.Equal(t, namespaceName, deployment.Namespace)
assert.Equal(t, "actions-runner-controller-2-fullname-override", deployment.Name)
assert.Equal(t, "actions-runner-controller-2-0.1.0", deployment.Labels["helm.sh/chart"])
assert.Equal(t, "actions-runner-controller-2-override", deployment.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "preview", deployment.Labels["app.kubernetes.io/version"])
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
assert.Equal(t, "bar", deployment.Labels["foo"])
assert.Equal(t, "actions", deployment.Labels["github"])
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
assert.Equal(t, "actions-runner-controller-2-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
assert.Equal(t, "actions-runner-controller-2-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
assert.Equal(t, "actions-runner-controller-2-sa", deployment.Spec.Template.Spec.ServiceAccountName)
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
assert.Len(t, deployment.Spec.Template.Spec.Volumes, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Volumes[0].Name)
assert.NotNil(t, 10, deployment.Spec.Template.Spec.Volumes[0].EmptyDir)
assert.Len(t, deployment.Spec.Template.Spec.NodeSelector, 1)
assert.Equal(t, "bar", deployment.Spec.Template.Spec.NodeSelector["foo"])
assert.NotNil(t, deployment.Spec.Template.Spec.Affinity.NodeAffinity)
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key)
assert.Equal(t, "bar", string(deployment.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Operator))
assert.Len(t, deployment.Spec.Template.Spec.Tolerations, 1)
assert.Equal(t, "foo", deployment.Spec.Template.Spec.Tolerations[0].Key)
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, corev1.PullAlways, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2)
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAME", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "metadata.name", deployment.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
assert.Equal(t, "500m", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot)
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
assert.Equal(t, "tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
}
func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"replicaCount": "2",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/leader_election_role.yaml"})
var leaderRole rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &leaderRole)
assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-role", leaderRole.Name)
assert.Equal(t, namespaceName, leaderRole.Namespace)
}
func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"replicaCount": "2",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/leader_election_role_binding.yaml"})
var leaderRoleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &leaderRoleBinding)
assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-rolebinding", leaderRoleBinding.Name)
assert.Equal(t, namespaceName, leaderRoleBinding.Namespace)
assert.Equal(t, "test-arc-actions-runner-controller-2-leader-election-role", leaderRoleBinding.RoleRef.Name)
assert.Equal(t, "test-arc-actions-runner-controller-2", leaderRoleBinding.Subjects[0].Name)
}
func TestTemplate_EnableLeaderElection(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"replicaCount": "2",
"image.tag": "dev",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
assert.Equal(t, namespaceName, deployment.Namespace)
assert.Equal(t, "test-arc-actions-runner-controller-2", deployment.Name)
assert.Equal(t, int32(2), *deployment.Spec.Replicas)
assert.Len(t, deployment.Spec.Template.Spec.Containers, 1)
assert.Equal(t, "manager", deployment.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner-controller-2:dev", deployment.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, corev1.PullIfNotPresent, deployment.Spec.Template.Spec.Containers[0].ImagePullPolicy)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
assert.Equal(t, "--enable-leader-election", deployment.Spec.Template.Spec.Containers[0].Args[1])
assert.Equal(t, "--leader-election-id=test-arc-actions-runner-controller-2", deployment.Spec.Template.Spec.Containers[0].Args[2])
}
func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../actions-runner-controller-2")
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"imagePullSecrets[0].name": "dockerhub",
"imagePullSecrets[1].name": "ghcr",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
assert.Equal(t, namespaceName, deployment.Namespace)
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2)
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1])
}

View File

@ -0,0 +1,65 @@
# Default values for actions-runner-controller-2.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
labels: {}
# leaderElection will be enabled when replicaCount>1,
# So, only one replica will in charge of reconciliation at a given time
# leaderElectionId will be set to {{ define actions-runner-controller-2.fullname }}.
replicaCount: 1
image:
repository: "ghcr.io/actions/actions-runner-controller-2"
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created for running the controller pod
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# You can not use the default service account for this.
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Leverage a PriorityClass to ensure your pods survive resource shortages
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# PriorityClass: system-cluster-critical
priorityClassName: ""

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,33 @@
apiVersion: v2
name: auto-scaling-runner-set
description: A Helm chart for deploying an AutoScalingRunnerSet
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.1.0"
home: https://github.com/actions/dev-arc
sources:
- "https://github.com/actions/dev-arc"
maintainers:
- name: actions
url: https://github.com/actions

View File

@ -0,0 +1,6 @@
# Set the following to dummy values.
# This is only useful in CI
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test

View File

@ -0,0 +1,3 @@
Thank you for installing {{ .Chart.Name }}.
Your release is named {{ .Release.Name }}.

View File

@ -0,0 +1,313 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "auto-scaling-runner-set.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "auto-scaling-runner-set.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "auto-scaling-runner-set.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "auto-scaling-runner-set.labels" -}}
helm.sh/chart: {{ include "auto-scaling-runner-set.chart" . }}
{{ include "auto-scaling-runner-set.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "auto-scaling-runner-set.selectorLabels" -}}
app.kubernetes.io/name: {{ include "auto-scaling-runner-set.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "auto-scaling-runner-set.githubsecret" -}}
{{- include "auto-scaling-runner-set.fullname" . }}-github-secret
{{- end }}
{{- define "auto-scaling-runner-set.noPermissionServiceAccountName" -}}
{{- include "auto-scaling-runner-set.fullname" . }}-no-permission-service-account
{{- end }}
{{- define "auto-scaling-runner-set.kubeModeRoleName" -}}
{{- include "auto-scaling-runner-set.fullname" . }}-kube-mode-role
{{- end }}
{{- define "auto-scaling-runner-set.kubeModeServiceAccountName" -}}
{{- include "auto-scaling-runner-set.fullname" . }}-kube-mode-service-account
{{- end }}
{{- define "auto-scaling-runner-set.dind-init-container" -}}
{{- range $i, $val := .Values.template.spec.containers -}}
{{- if eq $val.name "runner" -}}
image: {{ $val.image }}
{{- if $val.imagePullSecrets }}
imagePullSecrets:
{{ $val.imagePullSecrets | toYaml -}}
{{- end }}
command: ["cp"]
args: ["-r", "-v", "/actions-runner/externals/.", "/actions-runner/tmpDir/"]
volumeMounts:
- name: dind-externals
mountPath: /actions-runner/tmpDir
{{- end }}
{{- end }}
{{- end }}
{{- define "auto-scaling-runner-set.dind-container" -}}
image: docker:dind
securityContext:
privileged: true
volumeMounts:
- name: work
mountPath: /actions-runner/_work
- name: dind-cert
mountPath: /certs/client
- name: dind-externals
mountPath: /actions-runner/externals
{{- end }}
{{- define "auto-scaling-runner-set.dind-volume" -}}
- name: dind-cert
emptyDir: {}
- name: dind-externals
emptyDir: {}
{{- end }}
{{- define "auto-scaling-runner-set.dind-work-volume" -}}
{{- $createWorkVolume := 1 }}
{{- range $i, $volume := .Values.template.spec.volumes }}
{{- if eq $volume.name "work" }}
{{- $createWorkVolume = 0 -}}
- name: work
{{- range $key, $val := $volume }}
{{- if ne $key "name" }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if eq $createWorkVolume 1 }}
- name: work
emptyDir: {}
{{- end }}
{{- end }}
{{- define "auto-scaling-runner-set.kubernetes-mode-work-volume" -}}
{{- $createWorkVolume := 1 }}
{{- range $i, $volume := .Values.template.spec.volumes }}
{{- if eq $volume.name "work" }}
{{- $createWorkVolume = 0 -}}
- name: work
{{- range $key, $val := $volume }}
{{- if ne $key "name" }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if eq $createWorkVolume 1 }}
- name: work
ephemeral:
volumeClaimTemplate:
spec:
{{- .Values.containerMode.kubernetesModeWorkVolumeClaim | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- define "auto-scaling-runner-set.non-work-volumes" -}}
{{- range $i, $volume := .Values.template.spec.volumes }}
{{- if ne $volume.name "work" }}
- name: {{ $volume.name }}
{{- range $key, $val := $volume }}
{{- if ne $key "name" }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- define "auto-scaling-runner-set.non-runner-containers" -}}
{{- range $i, $container := .Values.template.spec.containers -}}
{{- if ne $container.name "runner" -}}
- name: {{ $container.name }}
{{- range $key, $val := $container }}
{{- if ne $key "name" }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- define "auto-scaling-runner-set.dind-runner-container" -}}
{{- range $i, $container := .Values.template.spec.containers -}}
{{- if eq $container.name "runner" -}}
{{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- $setDockerHost := 1 }}
{{- $setDockerTlsVerify := 1 }}
{{- $setDockerCertPath := 1 }}
env:
{{- with $container.env }}
{{- range $i, $env := . }}
{{- if eq $env.name "DOCKER_HOST" }}
{{- $setDockerHost = 0 -}}
{{- end }}
{{- if eq $env.name "DOCKER_TLS_VERIFY" }}
{{- $setDockerTlsVerify = 0 -}}
{{- end }}
{{- if eq $env.name "DOCKER_CERT_PATH" }}
{{- $setDockerCertPath = 0 -}}
{{- end }}
- name: {{ $env.name }}
{{- range $envKey, $envVal := $env }}
{{- if ne $envKey "name" }}
{{ $envKey }}: {{ $envVal | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}
{{- if $setDockerHost }}
- name: DOCKER_HOST
value: tcp://localhost:2376
{{- end }}
{{- if $setDockerTlsVerify }}
- name: DOCKER_TLS_VERIFY
value: "1"
{{- end }}
{{- if $setDockerCertPath }}
- name: DOCKER_CERT_PATH
value: /certs/client
{{- end }}
{{- end }}
{{- $mountWork := 1 }}
{{- $mountDindCert := 1 }}
volumeMounts:
{{- with $container.volumeMounts }}
{{- range $i, $volMount := . }}
{{- if eq $volMount.name "work" }}
{{- $mountWork = 0 -}}
{{- end }}
{{- if eq $volMount.name "dind-cert" }}
{{- $mountDindCert = 0 -}}
{{- end }}
- name: {{ $volMount.name }}
{{- range $mountKey, $mountVal := $volMount }}
{{- if ne $mountKey "name" }}
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if $mountWork }}
- name: work
mountPath: /actions-runner/_work
{{- end }}
{{- if $mountDindCert }}
- name: dind-cert
mountPath: /certs/client
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- define "auto-scaling-runner-set.kubernetes-mode-runner-container" -}}
{{- range $i, $container := .Values.template.spec.containers -}}
{{- if eq $container.name "runner" -}}
{{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{ $key }}: {{ $val }}
{{- end }}
{{- end }}
{{- $setContainerHooks := 1 }}
{{- $setPodName := 1 }}
{{- $setRequireJobContainer := 1 }}
env:
{{- with $container.env }}
{{- range $i, $env := . }}
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
{{- $setContainerHooks = 0 -}}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
{{- $setPodName = 0 -}}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
{{- $setRequireJobContainer = 0 -}}
{{- end }}
- name: {{ $env.name }}
{{- range $envKey, $envVal := $env }}
{{- if ne $envKey "name" }}
{{ $envKey }}: {{ $envVal | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if $setContainerHooks }}
- name: ACTIONS_RUNNER_CONTAINER_HOOKS
value: /actions-runner/k8s/index.js
{{- end }}
{{- if $setPodName }}
- name: ACTIONS_RUNNER_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
{{- end }}
{{- if $setRequireJobContainer }}
- name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
value: "true"
{{- end }}
{{- $mountWork := 1 }}
volumeMounts:
{{- with $container.volumeMounts }}
{{- range $i, $volMount := . }}
{{- if eq $volMount.name "work" }}
{{- $mountWork = 0 -}}
{{- end }}
- name: {{ $volMount.name }}
{{- range $mountKey, $mountVal := $volMount }}
{{- if ne $mountKey "name" }}
{{ $mountKey }}: {{ $mountVal | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if $mountWork }}
- name: work
mountPath: /actions-runner/_work
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,91 @@
apiVersion: actions.github.com/v1alpha1
kind: AutoscalingRunnerSet
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "auto-scaling-runner-set.labels" . | nindent 4 }}
spec:
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" .Values.githubConfigUrl }}
githubConfigSecret: {{ include "auto-scaling-runner-set.githubsecret" . }}
{{- with .Values.runnerGroup }}
runnerGroup: {{ . }}
{{- end }}
{{- if and (kindIs "int64" .Values.minRunners) (kindIs "int64" .Values.maxRunners) }}
{{- if gt .Values.minRunners .Values.maxRunners }}
{{- fail "maxRunners has to be greater or equal to minRunners" }}
{{- end }}
{{- end }}
{{- if kindIs "int64" .Values.maxRunners }}
{{- if lt .Values.maxRunners 0 }}
{{- fail "maxRunners has to be greater or equal to 0" }}
{{- end }}
maxRunners: {{ .Values.maxRunners | int }}
{{- end }}
{{- if kindIs "int64" .Values.minRunners }}
{{- if lt .Values.minRunners 0 }}
{{- fail "minRunners has to be greater or equal to 0" }}
{{- end }}
minRunners: {{ .Values.minRunners | int }}
{{- end }}
template:
{{- with .Values.template.metadata }}
metadata:
{{- with .labels }}
labels:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- range $key, $val := .Values.template.spec }}
{{- if and (ne $key "containers") (ne $key "volumes") (ne $key "initContainers") (ne $key "serviceAccountName") }}
{{ $key }}: {{ $val | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- if eq .Values.containerMode.type "kubernetes" }}
serviceAccountName: {{ default (include "auto-scaling-runner-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }}
{{- else }}
serviceAccountName: {{ default (include "auto-scaling-runner-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }}
{{- end }}
{{- if or .Values.template.spec.initContainers (eq .Values.containerMode.type "dind") }}
initContainers:
{{- if eq .Values.containerMode.type "dind" }}
- name: init-dind-externals
{{- include "auto-scaling-runner-set.dind-init-container" . | nindent 8 }}
{{- end }}
{{- with .Values.template.spec.initContainers }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
containers:
{{- if eq .Values.containerMode.type "dind" }}
- name: runner
{{- include "auto-scaling-runner-set.dind-runner-container" . | nindent 8 }}
- name: dind
{{- include "auto-scaling-runner-set.dind-container" . | nindent 8 }}
{{- include "auto-scaling-runner-set.non-runner-containers" . | nindent 6 }}
{{- else if eq .Values.containerMode.type "kubernetes" }}
- name: runner
{{- include "auto-scaling-runner-set.kubernetes-mode-runner-container" . | nindent 8 }}
{{- include "auto-scaling-runner-set.non-runner-containers" . | nindent 6 }}
{{- else }}
{{ .Values.template.spec.containers | toYaml | nindent 6 }}
{{- end }}
{{- if or .Values.template.spec.volumes (eq .Values.containerMode.type "dind") (eq .Values.containerMode.type "kubernetes") }}
volumes:
{{- if eq .Values.containerMode.type "dind" }}
{{- include "auto-scaling-runner-set.dind-volume" . | nindent 6 }}
{{- include "auto-scaling-runner-set.dind-work-volume" . | nindent 6 }}
{{- else if eq .Values.containerMode.type "kubernetes" }}
{{- include "auto-scaling-runner-set.kubernetes-mode-work-volume" . | nindent 6 }}
{{- end }}
{{- include "auto-scaling-runner-set.non-work-volumes" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "auto-scaling-runner-set.githubsecret" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "auto-scaling-runner-set.labels" . | nindent 4 }}
finalizers:
- actions.github.com/secret-protection
data:
{{- $hasToken := false }}
{{- $hasAppId := false }}
{{- $hasInstallationId := false }}
{{- $hasPrivateKey := false }}
{{- range $secretName, $secretValue := (required "Values.githubConfigSecret is required for setting auth with GitHub server." .Values.githubConfigSecret) }}
{{- if $secretValue }}
{{ $secretName }}: {{ $secretValue | toString | b64enc }}
{{- if eq $secretName "github_token" }}
{{- $hasToken = true }}
{{- end }}
{{- if eq $secretName "github_app_id" }}
{{- $hasAppId = true }}
{{- end }}
{{- if eq $secretName "github_app_installation_id" }}
{{- $hasInstallationId = true }}
{{- end }}
{{- if eq $secretName "github_app_private_key" }}
{{- $hasPrivateKey = true }}
{{- end }}
{{- end }}
{{- end }}
{{- if and (not $hasToken) (not ($hasAppId)) }}
{{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_token or .Values.githubConfigSecret.github_app_id." }}
{{- end }}
{{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }}
{{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }}
{{- end }}

View File

@ -0,0 +1,24 @@
{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
# default permission for runner pod service account in kubernetes mode (container hook)
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "auto-scaling-runner-set.kubeModeRoleName" . }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "create"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch",]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "create", "delete"]
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "auto-scaling-runner-set.kubeModeRoleName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "auto-scaling-runner-set.kubeModeRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "auto-scaling-runner-set.kubeModeServiceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,9 @@
{{- if and (eq .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "auto-scaling-runner-set.kubeModeServiceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "auto-scaling-runner-set.labels" . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,9 @@
{{- if and (ne .Values.containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "auto-scaling-runner-set.noPermissionServiceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "auto-scaling-runner-set.labels" . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,606 @@
package tests
import (
"path/filepath"
"strings"
"testing"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
)
func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"})
var githubSecret corev1.Secret
helm.UnmarshalK8SYaml(t, output, &githubSecret)
assert.Equal(t, namespaceName, githubSecret.Namespace)
assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", githubSecret.Name)
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0])
}
func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "10",
"githubConfigSecret.github_app_installation_id": "100",
"githubConfigSecret.github_app_private_key": "private_key",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"})
var githubSecret corev1.Secret
helm.UnmarshalK8SYaml(t, output, &githubSecret)
assert.Equal(t, namespaceName, githubSecret.Namespace)
assert.Equal(t, "10", string(githubSecret.Data["github_app_id"]))
assert.Equal(t, "100", string(githubSecret.Data["github_app_installation_id"]))
assert.Equal(t, "private_key", string(githubSecret.Data["github_app_private_key"]))
}
func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "",
"githubConfigSecret.github_token": "",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"})
require.Error(t, err)
assert.ErrorContains(t, err, "provide .Values.githubConfigSecret.github_token or .Values.githubConfigSecret.github_app_id")
}
func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_app_id": "10",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/githubsecret.yaml"})
require.Error(t, err)
assert.ErrorContains(t, err, "provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key")
}
func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/no_permission_serviceaccount.yaml"})
var serviceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
assert.Equal(t, namespaceName, serviceAccount.Namespace)
assert.Equal(t, "test-runners-auto-scaling-runner-set-no-permission-service-account", serviceAccount.Name)
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, "test-runners-auto-scaling-runner-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName)
}
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
var serviceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
assert.Equal(t, namespaceName, serviceAccount.Namespace)
assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-service-account", serviceAccount.Name)
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
var role rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &role)
assert.Equal(t, namespaceName, role.Namespace)
assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-role", role.Name)
assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules")
assert.Equal(t, "pods", role.Rules[0].Resources[0])
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
assert.Equal(t, "pods/log", role.Rules[2].Resources[0])
assert.Equal(t, "jobs", role.Rules[3].Resources[0])
assert.Equal(t, "secrets", role.Rules[4].Resources[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"})
var roleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &roleBinding)
assert.Equal(t, namespaceName, roleBinding.Namespace)
assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-role", roleBinding.Name)
assert.Len(t, roleBinding.Subjects, 1)
assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-service-account", roleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-role", roleBinding.RoleRef.Name)
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, "test-runners-auto-scaling-runner-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName)
}
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"template.spec.serviceAccountName": "test-service-account",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/no_permission_serviceaccount.yaml"})
assert.ErrorContains(t, err, "could not find template templates/no_permission_serviceaccount.yaml in chart", "no permission service account should not be rendered")
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName)
}
func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", ars.Spec.GitHubConfigSecret)
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil")
assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil")
assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil")
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
}
func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"template.metadata.labels.test1": "test1",
"template.metadata.labels.test2": "test2",
"template.metadata.annotations.test3": "test3",
"template.metadata.annotations.test4": "test4",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.NotNil(t, ars.Spec.Template.Labels, "Template.Spec.Labels should not be nil")
assert.Equal(t, "test1", ars.Spec.Template.Labels["test1"], "Template.Spec.Labels should have test1")
assert.Equal(t, "test2", ars.Spec.Template.Labels["test2"], "Template.Spec.Labels should have test2")
assert.NotNil(t, ars.Spec.Template.Annotations, "Template.Spec.Annotations should not be nil")
assert.Equal(t, "test3", ars.Spec.Template.Annotations["test3"], "Template.Spec.Annotations should have test3")
assert.Equal(t, "test4", ars.Spec.Template.Annotations["test4"], "Template.Spec.Annotations should have test4")
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
}
func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "-1",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
require.Error(t, err)
assert.ErrorContains(t, err, "maxRunners has to be greater or equal to 0")
}
func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "1",
"minRunners": "-1",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
require.Error(t, err)
assert.ErrorContains(t, err, "minRunners has to be greater or equal to 0")
}
func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "0",
"minRunners": "1",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
require.Error(t, err)
assert.ErrorContains(t, err, "maxRunners has to be greater or equal to minRunners")
}
func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "0",
"minRunners": "0",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, 0, *ars.Spec.MinRunners, "MinRunners should be 0")
assert.Equal(t, 0, *ars.Spec.MaxRunners, "MaxRunners should be 0")
}
func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"minRunners": "5",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, 5, *ars.Spec.MinRunners, "MinRunners should be 5")
assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil")
}
func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"maxRunners": "5",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, 5, *ars.Spec.MaxRunners, "MaxRunners should be 5")
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
}
func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "dind",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", ars.Spec.GitHubConfigSecret)
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil")
assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil")
assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil")
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 1, "Template.Spec should have 1 init container")
assert.Equal(t, "init-dind-externals", ars.Spec.Template.Spec.InitContainers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.InitContainers[0].Image)
assert.Equal(t, "cp", ars.Spec.Template.Spec.InitContainers[0].Command[0])
assert.Equal(t, "-r -v /actions-runner/externals/. /actions-runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " "))
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
}
func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../auto-scaling-runner-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "auto-scaling-runner-set", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "test-runners-auto-scaling-runner-set-github-secret", ars.Spec.GitHubConfigSecret)
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil")
assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil")
assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil")
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "/actions-runner/k8s/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "true", ars.Spec.Template.Spec.Containers[0].Env[2].Value)
assert.Len(t, ars.Spec.Template.Spec.Volumes, 1, "Template.Spec should have 1 volume")
assert.Equal(t, "work", ars.Spec.Template.Spec.Volumes[0].Name)
assert.NotNil(t, ars.Spec.Template.Spec.Volumes[0].Ephemeral, "Template.Spec should have 1 ephemeral volume")
}

View File

@ -0,0 +1,117 @@
## githubConfigUrl is the GitHub url for where you want to configure runners
## ex: https://github.com/myorg/myrepo or https://github.com/myorg
githubConfigUrl: ""
## githubConfigSecret is the k8s secrets to use when auth with GitHub API.
## You can choose to use GitHub App or a PAT token
githubConfigSecret:
### GitHub Apps Configuration
## NOTE: IDs MUST be strings, use quotes
#github_app_id: ""
#github_app_installation_id: ""
#github_app_private_key: |
### GitHub PAT Configuration
github_token: ""
## maxRunners is the max number of runners the auto scaling runner set will scale up to.
# maxRunners: 5
## minRunners is the min number of runners the auto scaling runner set will scale down to.
# minRunners: 0
# runnerGroup: "default"
## template is the PodSpec for each runner Pod
template:
spec:
containers:
- name: runner
image: ghcr.io/actions/actions-runner:latest
command: ["/actions-runner/run.sh"]
containerMode:
type: "" ## type can be set to dind or kubernetes
## with containerMode.type=dind, we will populate the template.spec with following pod spec
## template:
## spec:
## initContainers:
## - name: initExternalsInternalVolume
## image: ghcr.io/actions/actions-runner:latest
## command: ["cp", "-r", "-v", "/actions-runner/externals/.", "/actions-runner/tmpDir/"]
## volumeMounts:
## - name: externalsInternal
## mountPath: /actions-runner/tmpDir
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## env:
## - name: DOCKER_HOST
## value: tcp://localhost:2376
## - name: DOCKER_TLS_VERIFY
## value: "1"
## - name: DOCKER_CERT_PATH
## value: /certs/client
## volumeMounts:
## - name: workingDirectoryInternal
## mountPath: /actions-runner/_work
## - name: dinDInternal
## mountPath: /certs/client
## readOnly: true
## - name: dind
## image: docker:dind
## securityContext:
## privileged: true
## volumeMounts:
## - mountPath: /certs/client
## name: dinDInternal
## - mountPath: /actions-runner/_work
## name: workingDirectoryInternal
## - mountPath: /actions-runner/externals
## name: externalsInternal
## volumes:
## - name: dinDInternal
## emptyDir: {}
## - name: workingDirectoryInternal
## emptyDir: {}
## - name: externalsInternal
## emptyDir: {}
######################################################################################################
## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec
## template:
## spec:
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## env:
## - name: ACTIONS_RUNNER_CONTAINER_HOOKS
## value: /actions-runner/k8s/index.js
## - name: ACTIONS_RUNNER_POD_NAME
## valueFrom:
## fieldRef:
## fieldPath: metadata.name
## - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
## value: "true"
## volumeMounts:
## - name: work
## mountPath: /actions-runner/_work
## volumes:
## - name: work
## ephemeral:
## volumeClaimTemplate:
## spec:
## accessModes: [ "ReadWriteOnce" ]
## storageClassName: "local-path"
## resources:
## requests:
## storage: 1Gi
## the following is required when containerMode.type=kubernetes
kubernetesModeWorkVolumeClaim:
accessModes: ["ReadWriteOnce"]
# For testing, use https://github.com/rancher/local-path-provisioner to provide dynamic provision volume
# TODO: remove before release
storageClassName: "dynamic-blob-storage"
resources:
requests:
storage: 1Gi