Trim gha-runner-scale-set to gha-rs in names and remove role type suffixes (#2706)
This commit is contained in:
		
							parent
							
								
									78271000c0
								
							
						
					
					
						commit
						6a75bc0880
					
				|  | @ -119,7 +119,7 @@ runs: | |||
| 
 | ||||
|     - name: Generate summary about the triggered workflow run | ||||
|       shell: bash | ||||
|       run: |  | ||||
|       run: | | ||||
|         cat <<-EOF > $GITHUB_STEP_SUMMARY | ||||
|         | **Triggered workflow run** | | ||||
|         |:--------------------------:| | ||||
|  | @ -199,4 +199,4 @@ runs: | |||
|       shell: bash | ||||
|       if: always() | ||||
|       run: | | ||||
|         kubectl logs deployment/arc-gha-runner-scale-set-controller -n ${{inputs.arc-controller-namespace}} | ||||
|         kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}} | ||||
|  |  | |||
|  | @ -19,7 +19,7 @@ env: | |||
|   IMAGE_VERSION: "0.4.0" | ||||
| 
 | ||||
| concurrency: | ||||
|   # This will make sure we only apply the concurrency limits on pull requests  | ||||
|   # This will make sure we only apply the concurrency limits on pull requests | ||||
|   # but not pushes to master branch by making the concurrency group name unique | ||||
|   # for pushes | ||||
|   group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} | ||||
|  | @ -58,21 +58,21 @@ jobs: | |||
|           --debug | ||||
|           count=0 | ||||
|           while true; do | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name) | ||||
|             if [ -n "$POD_NAME" ]; then | ||||
|               echo "Pod found: $POD_NAME" | ||||
|               break | ||||
|             fi | ||||
|             if [ "$count" -ge 60 ]; then | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller" | ||||
|               exit 1 | ||||
|             fi | ||||
|             sleep 1 | ||||
|             count=$((count+1)) | ||||
|           done | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller | ||||
|           kubectl get pod -n arc-systems | ||||
|           kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems | ||||
|           kubectl describe deployment arc-gha-rs-controller -n arc-systems | ||||
| 
 | ||||
|       - name: Install gha-runner-scale-set | ||||
|         id: install_arc | ||||
|  | @ -149,21 +149,21 @@ jobs: | |||
|           --debug | ||||
|           count=0 | ||||
|           while true; do | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name) | ||||
|             if [ -n "$POD_NAME" ]; then | ||||
|               echo "Pod found: $POD_NAME" | ||||
|               break | ||||
|             fi | ||||
|             if [ "$count" -ge 60 ]; then | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller" | ||||
|               exit 1 | ||||
|             fi | ||||
|             sleep 1 | ||||
|             count=$((count+1)) | ||||
|           done | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller | ||||
|           kubectl get pod -n arc-systems | ||||
|           kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems | ||||
|           kubectl describe deployment arc-gha-rs-controller -n arc-systems | ||||
| 
 | ||||
|       - name: Install gha-runner-scale-set | ||||
|         id: install_arc | ||||
|  | @ -238,21 +238,21 @@ jobs: | |||
|           --debug | ||||
|           count=0 | ||||
|           while true; do | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name) | ||||
|             if [ -n "$POD_NAME" ]; then | ||||
|               echo "Pod found: $POD_NAME" | ||||
|               break | ||||
|             fi | ||||
|             if [ "$count" -ge 60 ]; then | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller" | ||||
|               exit 1 | ||||
|             fi | ||||
|             sleep 1 | ||||
|             count=$((count+1)) | ||||
|           done | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller | ||||
|           kubectl get pod -n arc-systems | ||||
|           kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems | ||||
|           kubectl describe deployment arc-gha-rs-controller -n arc-systems | ||||
| 
 | ||||
|       - name: Install gha-runner-scale-set | ||||
|         id: install_arc | ||||
|  | @ -333,21 +333,21 @@ jobs: | |||
|           --debug | ||||
|           count=0 | ||||
|           while true; do | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name) | ||||
|             if [ -n "$POD_NAME" ]; then | ||||
|               echo "Pod found: $POD_NAME" | ||||
|               break | ||||
|             fi | ||||
|             if [ "$count" -ge 60 ]; then | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller" | ||||
|               exit 1 | ||||
|             fi | ||||
|             sleep 1 | ||||
|             count=$((count+1)) | ||||
|           done | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller | ||||
|           kubectl get pod -n arc-systems | ||||
|           kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems | ||||
|           kubectl describe deployment arc-gha-rs-controller -n arc-systems | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n openebs -l name=openebs-localpv-provisioner | ||||
| 
 | ||||
|       - name: Install gha-runner-scale-set | ||||
|  | @ -427,21 +427,21 @@ jobs: | |||
|           --debug | ||||
|           count=0 | ||||
|           while true; do | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name) | ||||
|             if [ -n "$POD_NAME" ]; then | ||||
|               echo "Pod found: $POD_NAME" | ||||
|               break | ||||
|             fi | ||||
|             if [ "$count" -ge 60 ]; then | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller" | ||||
|               exit 1 | ||||
|             fi | ||||
|             sleep 1 | ||||
|             count=$((count+1)) | ||||
|           done | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller | ||||
|           kubectl get pod -n arc-systems | ||||
|           kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems | ||||
|           kubectl describe deployment arc-gha-rs-controller -n arc-systems | ||||
| 
 | ||||
|       - name: Install gha-runner-scale-set | ||||
|         id: install_arc | ||||
|  | @ -528,21 +528,21 @@ jobs: | |||
|           --debug | ||||
|           count=0 | ||||
|           while true; do | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name) | ||||
|             if [ -n "$POD_NAME" ]; then | ||||
|               echo "Pod found: $POD_NAME" | ||||
|               break | ||||
|             fi | ||||
|             if [ "$count" -ge 60 ]; then | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller" | ||||
|               exit 1 | ||||
|             fi | ||||
|             sleep 1 | ||||
|             count=$((count+1)) | ||||
|           done | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller | ||||
|           kubectl get pod -n arc-systems | ||||
|           kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems | ||||
|           kubectl describe deployment arc-gha-rs-controller -n arc-systems | ||||
| 
 | ||||
|       - name: Install gha-runner-scale-set | ||||
|         id: install_arc | ||||
|  | @ -623,21 +623,21 @@ jobs: | |||
|           --debug | ||||
|           count=0 | ||||
|           while true; do | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name) | ||||
|             if [ -n "$POD_NAME" ]; then | ||||
|               echo "Pod found: $POD_NAME" | ||||
|               break | ||||
|             fi | ||||
|             if [ "$count" -ge 60 ]; then | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller" | ||||
|               exit 1 | ||||
|             fi | ||||
|             sleep 1 | ||||
|             count=$((count+1)) | ||||
|           done | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller | ||||
|           kubectl get pod -n arc-systems | ||||
|           kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems | ||||
|           kubectl describe deployment arc-gha-rs-controller -n arc-systems | ||||
| 
 | ||||
|       - name: Install gha-runner-scale-set | ||||
|         id: install_arc | ||||
|  | @ -744,21 +744,21 @@ jobs: | |||
|           --debug | ||||
|           count=0 | ||||
|           while true; do | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name) | ||||
|             POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name) | ||||
|             if [ -n "$POD_NAME" ]; then | ||||
|               echo "Pod found: $POD_NAME" | ||||
|               break | ||||
|             fi | ||||
|             if [ "$count" -ge 60 ]; then | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" | ||||
|               echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller" | ||||
|               exit 1 | ||||
|             fi | ||||
|             sleep 1 | ||||
|             count=$((count+1)) | ||||
|           done | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller | ||||
|           kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller | ||||
|           kubectl get pod -n arc-systems | ||||
|           kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems | ||||
|           kubectl describe deployment arc-gha-rs-controller -n arc-systems | ||||
| 
 | ||||
|       - name: Install gha-runner-scale-set | ||||
|         id: install_arc | ||||
|  | @ -872,11 +872,11 @@ jobs: | |||
|             sleep 1 | ||||
|             count=$((count+1)) | ||||
|           done | ||||
|        | ||||
| 
 | ||||
|       - name: Gather logs and cleanup | ||||
|         shell: bash | ||||
|         if: always() | ||||
|         run: | | ||||
|           helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug | ||||
|           kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}" | ||||
|           kubectl logs deployment/arc-gha-runner-scale-set-controller -n "arc-systems" | ||||
|           kubectl logs deployment/arc-gha-rs-controller -n "arc-systems" | ||||
|  |  | |||
|  | @ -1,5 +1,5 @@ | |||
| apiVersion: v2 | ||||
| name: gha-runner-scale-set-controller | ||||
| name: gha-rs-controller | ||||
| description: A Helm chart for install actions-runner-controller CRD | ||||
| 
 | ||||
| # A chart can be either an 'application' or a 'library' chart. | ||||
|  |  | |||
|  | @ -39,7 +39,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set-controller.chart" . }} | |||
| {{- if .Chart.AppVersion }} | ||||
| app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} | ||||
| {{- end }} | ||||
| app.kubernetes.io/part-of: gha-runner-scale-set-controller | ||||
| app.kubernetes.io/part-of: gha-rs-controller | ||||
| app.kubernetes.io/managed-by: {{ .Release.Service }} | ||||
| {{- range $k, $v := .Values.labels }} | ||||
| {{ $k }}: {{ $v }} | ||||
|  | @ -73,43 +73,43 @@ Create the name of the service account to use | |||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.managerClusterRoleName" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-role | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }} | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.managerClusterRoleBinding" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-rolebinding | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }} | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-role | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-rolebinding | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-watch-role | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace-watch | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleBinding" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-watch-rolebinding | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace-watch | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-role | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-listener | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.managerListenerRoleBinding" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-rolebinding | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-listener | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election-role | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}} | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election-rolebinding | ||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}} | ||||
|  |  | |||
|  | @ -23,7 +23,7 @@ spec: | |||
|         {{- toYaml . | nindent 8 }} | ||||
|       {{- end }} | ||||
|       labels: | ||||
|         app.kubernetes.io/part-of: gha-runner-scale-set-controller | ||||
|         app.kubernetes.io/part-of: gha-rs-controller | ||||
|         app.kubernetes.io/component: controller-manager | ||||
|         app.kubernetes.io/version: {{ .Chart.Version }} | ||||
|         {{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }} | ||||
|  |  | |||
|  | @ -48,7 +48,7 @@ func TestTemplate_CreateServiceAccount(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &serviceAccount) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, serviceAccount.Namespace) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", serviceAccount.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", serviceAccount.Name) | ||||
| 	assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"])) | ||||
| } | ||||
| 
 | ||||
|  | @ -175,7 +175,7 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &managerClusterRole) | ||||
| 
 | ||||
| 	assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace") | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRole.Name) | ||||
| 	assert.Equal(t, 16, len(managerClusterRole.Rules)) | ||||
| 
 | ||||
| 	_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"}) | ||||
|  | @ -209,9 +209,9 @@ func TestTemplate_ManagerClusterRoleBinding(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &managerClusterRoleBinding) | ||||
| 
 | ||||
| 	assert.Empty(t, managerClusterRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace") | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-rolebinding", managerClusterRoleBinding.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerClusterRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRoleBinding.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, namespaceName, managerClusterRoleBinding.Subjects[0].Namespace) | ||||
| 
 | ||||
| 	_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"}) | ||||
|  | @ -243,8 +243,7 @@ func TestTemplate_CreateManagerListenerRole(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &managerListenerRole) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, managerListenerRole.Namespace, "Role should have a namespace") | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRole.Name) | ||||
| 
 | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-listener", managerListenerRole.Name) | ||||
| 	assert.Equal(t, 4, len(managerListenerRole.Rules)) | ||||
| 	assert.Equal(t, "pods", managerListenerRole.Rules[0].Resources[0]) | ||||
| 	assert.Equal(t, "pods/status", managerListenerRole.Rules[1].Resources[0]) | ||||
|  | @ -276,9 +275,9 @@ func TestTemplate_ManagerListenerRoleBinding(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &managerListenerRoleBinding) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, managerListenerRoleBinding.Namespace, "RoleBinding should have a namespace") | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-rolebinding", managerListenerRoleBinding.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerListenerRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-listener", managerListenerRoleBinding.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-listener", managerListenerRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", managerListenerRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, namespaceName, managerListenerRoleBinding.Subjects[0].Namespace) | ||||
| } | ||||
| 
 | ||||
|  | @ -313,29 +312,29 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &deployment) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, deployment.Namespace) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name) | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"]) | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name) | ||||
| 	assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"]) | ||||
| 	assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) | ||||
| 	assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) | ||||
| 	assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) | ||||
| 	assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"]) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"]) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", deployment.Labels["actions.github.com/controller-service-account-name"]) | ||||
| 	assert.NotContains(t, deployment.Labels, "actions.github.com/controller-watch-single-namespace") | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"]) | ||||
| 	assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/part-of"]) | ||||
| 
 | ||||
| 	assert.Equal(t, int32(1), *deployment.Spec.Replicas) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"]) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"]) | ||||
| 
 | ||||
| 	assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"]) | ||||
| 
 | ||||
| 	assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", deployment.Spec.Template.Spec.ServiceAccountName) | ||||
| 	assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext) | ||||
| 	assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName) | ||||
| 	assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) | ||||
|  | @ -408,11 +407,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { | |||
| 			"image.pullPolicy":             "Always", | ||||
| 			"image.tag":                    "dev", | ||||
| 			"imagePullSecrets[0].name":     "dockerhub", | ||||
| 			"nameOverride":                 "gha-runner-scale-set-controller-override", | ||||
| 			"fullnameOverride":             "gha-runner-scale-set-controller-fullname-override", | ||||
| 			"nameOverride":                 "gha-rs-controller-override", | ||||
| 			"fullnameOverride":             "gha-rs-controller-fullname-override", | ||||
| 			"env[0].name":                  "ENV_VAR_NAME_1", | ||||
| 			"env[0].value":                 "ENV_VAR_VALUE_1", | ||||
| 			"serviceAccount.name":          "gha-runner-scale-set-controller-sa", | ||||
| 			"serviceAccount.name":          "gha-rs-controller-sa", | ||||
| 			"podAnnotations.foo":           "bar", | ||||
| 			"podSecurityContext.fsGroup":   "1000", | ||||
| 			"securityContext.runAsUser":    "1000", | ||||
|  | @ -436,22 +435,22 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &deployment) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, deployment.Namespace) | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller-fullname-override", deployment.Name) | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"]) | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs-controller-fullname-override", deployment.Name) | ||||
| 	assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"]) | ||||
| 	assert.Equal(t, "gha-rs-controller-override", deployment.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) | ||||
| 	assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) | ||||
| 	assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"]) | ||||
| 	assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/part-of"]) | ||||
| 	assert.Equal(t, "bar", deployment.Labels["foo"]) | ||||
| 	assert.Equal(t, "actions", deployment.Labels["github"]) | ||||
| 
 | ||||
| 	assert.Equal(t, int32(1), *deployment.Spec.Replicas) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs-controller-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"]) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs-controller-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"]) | ||||
| 
 | ||||
| 	assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"]) | ||||
|  | @ -462,7 +461,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) { | |||
| 
 | ||||
| 	assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1) | ||||
| 	assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name) | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName) | ||||
| 	assert.Equal(t, "gha-rs-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName) | ||||
| 	assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup) | ||||
| 	assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName) | ||||
| 	assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) | ||||
|  | @ -545,7 +544,7 @@ func TestTemplate_EnableLeaderElectionRole(t *testing.T) { | |||
| 	var leaderRole rbacv1.Role | ||||
| 	helm.UnmarshalK8SYaml(t, output, &leaderRole) | ||||
| 
 | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-role", leaderRole.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-leader-election", leaderRole.Name) | ||||
| 	assert.Equal(t, namespaceName, leaderRole.Namespace) | ||||
| } | ||||
| 
 | ||||
|  | @ -572,10 +571,10 @@ func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) { | |||
| 	var leaderRoleBinding rbacv1.RoleBinding | ||||
| 	helm.UnmarshalK8SYaml(t, output, &leaderRoleBinding) | ||||
| 
 | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-rolebinding", leaderRoleBinding.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-leader-election", leaderRoleBinding.Name) | ||||
| 	assert.Equal(t, namespaceName, leaderRoleBinding.Namespace) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-role", leaderRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", leaderRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-leader-election", leaderRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", leaderRoleBinding.Subjects[0].Name) | ||||
| } | ||||
| 
 | ||||
| func TestTemplate_EnableLeaderElection(t *testing.T) { | ||||
|  | @ -603,7 +602,7 @@ func TestTemplate_EnableLeaderElection(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &deployment) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, deployment.Namespace) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name) | ||||
| 
 | ||||
| 	assert.Equal(t, int32(2), *deployment.Spec.Replicas) | ||||
| 
 | ||||
|  | @ -618,7 +617,7 @@ func TestTemplate_EnableLeaderElection(t *testing.T) { | |||
| 	expectedArgs := []string{ | ||||
| 		"--auto-scaling-runner-set-only", | ||||
| 		"--enable-leader-election", | ||||
| 		"--leader-election-id=test-arc-gha-runner-scale-set-controller", | ||||
| 		"--leader-election-id=test-arc-gha-rs-controller", | ||||
| 		"--log-level=debug", | ||||
| 		"--log-format=text", | ||||
| 		"--update-strategy=immediate", | ||||
|  | @ -696,28 +695,28 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &deployment) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, deployment.Namespace) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name) | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"]) | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name) | ||||
| 	assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"]) | ||||
| 	assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"]) | ||||
| 	assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"]) | ||||
| 	assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"]) | ||||
| 	assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"]) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"]) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", deployment.Labels["actions.github.com/controller-service-account-name"]) | ||||
| 	assert.Equal(t, "demo", deployment.Labels["actions.github.com/controller-watch-single-namespace"]) | ||||
| 
 | ||||
| 	assert.Equal(t, int32(1), *deployment.Spec.Replicas) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"]) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"]) | ||||
| 
 | ||||
| 	assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"]) | ||||
| 
 | ||||
| 	assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", deployment.Spec.Template.Spec.ServiceAccountName) | ||||
| 	assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext) | ||||
| 	assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName) | ||||
| 	assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds) | ||||
|  | @ -798,7 +797,7 @@ func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &deployment) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, deployment.Namespace) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name) | ||||
| 
 | ||||
| 	assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7) | ||||
| 	assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name) | ||||
|  | @ -881,7 +880,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) { | |||
| 	var managerSingleNamespaceControllerRole rbacv1.Role | ||||
| 	helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRole) | ||||
| 
 | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRole.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-single-namespace", managerSingleNamespaceControllerRole.Name) | ||||
| 	assert.Equal(t, namespaceName, managerSingleNamespaceControllerRole.Namespace) | ||||
| 	assert.Equal(t, 10, len(managerSingleNamespaceControllerRole.Rules)) | ||||
| 
 | ||||
|  | @ -890,7 +889,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) { | |||
| 	var managerSingleNamespaceWatchRole rbacv1.Role | ||||
| 	helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRole) | ||||
| 
 | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-watch-role", managerSingleNamespaceWatchRole.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-single-namespace-watch", managerSingleNamespaceWatchRole.Name) | ||||
| 	assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace) | ||||
| 	assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules)) | ||||
| } | ||||
|  | @ -918,10 +917,10 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) { | |||
| 	var managerSingleNamespaceControllerRoleBinding rbacv1.RoleBinding | ||||
| 	helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRoleBinding) | ||||
| 
 | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceControllerRoleBinding.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-single-namespace", managerSingleNamespaceControllerRoleBinding.Name) | ||||
| 	assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Namespace) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-single-namespace", managerSingleNamespaceControllerRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Subjects[0].Namespace) | ||||
| 
 | ||||
| 	output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"}) | ||||
|  | @ -929,9 +928,9 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) { | |||
| 	var managerSingleNamespaceWatchRoleBinding rbacv1.RoleBinding | ||||
| 	helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRoleBinding) | ||||
| 
 | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-watch-rolebinding", managerSingleNamespaceWatchRoleBinding.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-single-namespace-watch", managerSingleNamespaceWatchRoleBinding.Name) | ||||
| 	assert.Equal(t, "demo", managerSingleNamespaceWatchRoleBinding.Namespace) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-watch-role", managerSingleNamespaceWatchRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller-single-namespace-watch", managerSingleNamespaceWatchRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-arc-gha-rs-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, namespaceName, managerSingleNamespaceWatchRoleBinding.Subjects[0].Namespace) | ||||
| } | ||||
|  |  | |||
|  | @ -1,5 +1,5 @@ | |||
| apiVersion: v2 | ||||
| name: gha-runner-scale-set | ||||
| name: gha-rs | ||||
| description: A Helm chart for deploying an AutoScalingRunnerSet | ||||
| 
 | ||||
| # A chart can be either an 'application' or a 'library' chart. | ||||
|  |  | |||
|  | @ -32,7 +32,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set.chart" . }} | |||
| app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} | ||||
| {{- end }} | ||||
| app.kubernetes.io/managed-by: {{ .Release.Service }} | ||||
| app.kubernetes.io/part-of: gha-runner-scale-set | ||||
| app.kubernetes.io/part-of: gha-rs | ||||
| actions.github.com/scale-set-name: {{ .Release.Name }} | ||||
| actions.github.com/scale-set-namespace: {{ .Release.Namespace }} | ||||
| {{- end }} | ||||
|  | @ -62,11 +62,11 @@ app.kubernetes.io/instance: {{ .Release.Name }} | |||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set.kubeModeRoleName" -}} | ||||
| {{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role | ||||
| {{- include "gha-runner-scale-set.fullname" . }}-kube-mode | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}} | ||||
| {{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role-binding | ||||
| {{- include "gha-runner-scale-set.fullname" . }}-kube-mode | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}} | ||||
|  | @ -428,11 +428,11 @@ volumeMounts: | |||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set.managerRoleName" -}} | ||||
| {{- include "gha-runner-scale-set.fullname" . }}-manager-role | ||||
| {{- include "gha-runner-scale-set.fullname" . }}-manager | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set.managerRoleBindingName" -}} | ||||
| {{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding | ||||
| {{- include "gha-runner-scale-set.fullname" . }}-manager | ||||
| {{- end }} | ||||
| 
 | ||||
| {{- define "gha-runner-scale-set.managerServiceAccountName" -}} | ||||
|  | @ -451,7 +451,7 @@ volumeMounts: | |||
|   {{- $managerServiceAccountName := "" }} | ||||
|   {{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }} | ||||
|     {{- if kindIs "map" $deployment.metadata.labels }} | ||||
|       {{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }} | ||||
|       {{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-rs-controller" }} | ||||
|         {{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }} | ||||
|           {{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }} | ||||
|           {{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}} | ||||
|  | @ -463,13 +463,13 @@ volumeMounts: | |||
|     {{- end }} | ||||
|   {{- end }} | ||||
|   {{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }} | ||||
|     {{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|   {{- end }} | ||||
|   {{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }} | ||||
|     {{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|   {{- end }} | ||||
|   {{- if gt $multiNamespacesCounter 1 }} | ||||
|     {{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|   {{- end }} | ||||
|   {{- if eq $multiNamespacesCounter 1 }} | ||||
|     {{- with $controllerDeployment.metadata }} | ||||
|  | @ -482,11 +482,11 @@ volumeMounts: | |||
|         {{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }} | ||||
|       {{- end }} | ||||
|     {{- else }} | ||||
|       {{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|       {{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- end }} | ||||
|   {{- end }} | ||||
|   {{- if eq $managerServiceAccountName "" }} | ||||
|     {{- fail "No service account name found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- fail "No service account name found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|   {{- end }} | ||||
| {{- $managerServiceAccountName }} | ||||
| {{- end }} | ||||
|  | @ -508,7 +508,7 @@ volumeMounts: | |||
|   {{- $managerServiceAccountNamespace := "" }} | ||||
|   {{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }} | ||||
|     {{- if kindIs "map" $deployment.metadata.labels }} | ||||
|       {{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }} | ||||
|       {{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-rs-controller" }} | ||||
|         {{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }} | ||||
|           {{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }} | ||||
|           {{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}} | ||||
|  | @ -520,13 +520,13 @@ volumeMounts: | |||
|     {{- end }} | ||||
|   {{- end }} | ||||
|   {{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }} | ||||
|     {{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|   {{- end }} | ||||
|   {{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }} | ||||
|     {{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|   {{- end }} | ||||
|   {{- if gt $multiNamespacesCounter 1 }} | ||||
|     {{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|   {{- end }} | ||||
|   {{- if eq $multiNamespacesCounter 1 }} | ||||
|     {{- with $controllerDeployment.metadata }} | ||||
|  | @ -539,11 +539,11 @@ volumeMounts: | |||
|         {{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }} | ||||
|       {{- end }} | ||||
|     {{- else }} | ||||
|       {{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|       {{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- end }} | ||||
|   {{- end }} | ||||
|   {{- if eq $managerServiceAccountNamespace "" }} | ||||
|     {{- fail "No service account namespace found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|     {{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }} | ||||
|   {{- end }} | ||||
| {{- $managerServiceAccountNamespace }} | ||||
| {{- end }} | ||||
|  |  | |||
|  | @ -45,7 +45,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &githubSecret) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, githubSecret.Namespace) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-github-secret", githubSecret.Name) | ||||
| 	assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"])) | ||||
| 	assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0]) | ||||
| } | ||||
|  | @ -190,13 +190,13 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &serviceAccount) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, serviceAccount.Namespace) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission", serviceAccount.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-no-permission", serviceAccount.Name) | ||||
| 
 | ||||
| 	output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) | ||||
| 	var ars v1alpha1.AutoscalingRunnerSet | ||||
| 	helm.UnmarshalK8SYaml(t, output, &ars) | ||||
| 
 | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission", ars.Spec.Template.Spec.ServiceAccountName) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-no-permission", ars.Spec.Template.Spec.ServiceAccountName) | ||||
| 	assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place
 | ||||
| } | ||||
| 
 | ||||
|  | @ -227,7 +227,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &serviceAccount) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, serviceAccount.Namespace) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode", serviceAccount.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-kube-mode", serviceAccount.Name) | ||||
| 	assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0]) | ||||
| 
 | ||||
| 	output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"}) | ||||
|  | @ -235,7 +235,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &role) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, role.Namespace) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-kube-mode", role.Name) | ||||
| 
 | ||||
| 	assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0]) | ||||
| 
 | ||||
|  | @ -251,11 +251,11 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &roleBinding) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, roleBinding.Namespace) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role-binding", roleBinding.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Name) | ||||
| 	assert.Len(t, roleBinding.Subjects, 1) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode", roleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "Role", roleBinding.RoleRef.Kind) | ||||
| 	assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0]) | ||||
| 
 | ||||
|  | @ -263,7 +263,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { | |||
| 	var ars v1alpha1.AutoscalingRunnerSet | ||||
| 	helm.UnmarshalK8SYaml(t, output, &ars) | ||||
| 
 | ||||
| 	expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode" | ||||
| 	expectedServiceAccountName := "test-runners-gha-rs-kube-mode" | ||||
| 	assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName) | ||||
| 	assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) | ||||
| } | ||||
|  | @ -330,14 +330,14 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) { | |||
| 	assert.Equal(t, namespaceName, ars.Namespace) | ||||
| 	assert.Equal(t, "test-runners", ars.Name) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) | ||||
| 	assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/part-of"]) | ||||
| 	assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/part-of"]) | ||||
| 	assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"]) | ||||
| 	assert.NotEmpty(t, ars.Labels["app.kubernetes.io/version"]) | ||||
| 
 | ||||
| 	assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret) | ||||
| 
 | ||||
| 	assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") | ||||
| 
 | ||||
|  | @ -383,10 +383,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) { | |||
| 	assert.Equal(t, namespaceName, ars.Namespace) | ||||
| 	assert.Equal(t, "test-runners", ars.Name) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) | ||||
| 	assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret) | ||||
| 	assert.Equal(t, "test-runner-scale-set-name", ars.Spec.RunnerScaleSetName) | ||||
| 
 | ||||
| 	assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") | ||||
|  | @ -840,10 +840,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) { | |||
| 	assert.Equal(t, namespaceName, ars.Namespace) | ||||
| 	assert.Equal(t, "test-runners", ars.Name) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) | ||||
| 	assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret) | ||||
| 
 | ||||
| 	assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") | ||||
| 
 | ||||
|  | @ -932,10 +932,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T) | |||
| 	assert.Equal(t, namespaceName, ars.Namespace) | ||||
| 	assert.Equal(t, "test-runners", ars.Name) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) | ||||
| 	assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret) | ||||
| 
 | ||||
| 	assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty") | ||||
| 	assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil") | ||||
|  | @ -989,7 +989,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T) | |||
| 	assert.Equal(t, namespaceName, ars.Namespace) | ||||
| 	assert.Equal(t, "test-runners", ars.Name) | ||||
| 
 | ||||
| 	assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"]) | ||||
| 	assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"]) | ||||
| 	assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl) | ||||
| 	assert.Equal(t, "pre-defined-secrets", ars.Spec.GitHubConfigSecret) | ||||
|  | @ -1548,7 +1548,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &managerRole) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release") | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-manager", managerRole.Name) | ||||
| 	assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0]) | ||||
| 	assert.Equal(t, 6, len(managerRole.Rules)) | ||||
| 
 | ||||
|  | @ -1584,7 +1584,7 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &managerRole) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release") | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-manager", managerRole.Name) | ||||
| 	assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0]) | ||||
| 	assert.Equal(t, 7, len(managerRole.Rules)) | ||||
| 	assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0]) | ||||
|  | @ -1617,8 +1617,8 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) { | |||
| 	helm.UnmarshalK8SYaml(t, output, &managerRoleBinding) | ||||
| 
 | ||||
| 	assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release") | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-manager", managerRoleBinding.Name) | ||||
| 	assert.Equal(t, "test-runners-gha-rs-manager", managerRoleBinding.RoleRef.Name) | ||||
| 	assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0]) | ||||
| 	assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name) | ||||
| 	assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace) | ||||
|  | @ -1887,12 +1887,12 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t | |||
| 	helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet) | ||||
| 
 | ||||
| 	annotationValues := map[string]string{ | ||||
| 		actionsgithubcom.AnnotationKeyGitHubSecretName:                 "test-runners-gha-runner-scale-set-github-secret", | ||||
| 		actionsgithubcom.AnnotationKeyManagerRoleName:                  "test-runners-gha-runner-scale-set-manager-role", | ||||
| 		actionsgithubcom.AnnotationKeyManagerRoleBindingName:           "test-runners-gha-runner-scale-set-manager-role-binding", | ||||
| 		actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode", | ||||
| 		actionsgithubcom.AnnotationKeyKubernetesModeRoleName:           "test-runners-gha-runner-scale-set-kube-mode-role", | ||||
| 		actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName:    "test-runners-gha-runner-scale-set-kube-mode-role-binding", | ||||
| 		actionsgithubcom.AnnotationKeyGitHubSecretName:                 "test-runners-gha-rs-github-secret", | ||||
| 		actionsgithubcom.AnnotationKeyManagerRoleName:                  "test-runners-gha-rs-manager", | ||||
| 		actionsgithubcom.AnnotationKeyManagerRoleBindingName:           "test-runners-gha-rs-manager", | ||||
| 		actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-rs-kube-mode", | ||||
| 		actionsgithubcom.AnnotationKeyKubernetesModeRoleName:           "test-runners-gha-rs-kube-mode", | ||||
| 		actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName:    "test-runners-gha-rs-kube-mode", | ||||
| 	} | ||||
| 
 | ||||
| 	for annotation, value := range annotationValues { | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue