Merge branch 'master' into feature/decouple_naming
This commit is contained in:
		
						commit
						25b57c7ad5
					
				|  | @ -16,7 +16,7 @@ env: | ||||||
|   TARGET_ORG: actions-runner-controller |   TARGET_ORG: actions-runner-controller | ||||||
|   TARGET_REPO: arc_e2e_test_dummy |   TARGET_REPO: arc_e2e_test_dummy | ||||||
|   IMAGE_NAME: "arc-test-image" |   IMAGE_NAME: "arc-test-image" | ||||||
|   IMAGE_VERSION: "0.9.1" |   IMAGE_VERSION: "0.9.2" | ||||||
| 
 | 
 | ||||||
| concurrency: | concurrency: | ||||||
|   # This will make sure we only apply the concurrency limits on pull requests |   # This will make sure we only apply the concurrency limits on pull requests | ||||||
|  |  | ||||||
|  | @ -1,7 +1,9 @@ | ||||||
| run: | run: | ||||||
|   timeout: 3m |   timeout: 3m | ||||||
| output: | output: | ||||||
|   format: github-actions |   formats: | ||||||
|  |     - format: github-actions | ||||||
|  |       path: stdout | ||||||
| linters-settings: | linters-settings: | ||||||
|   errcheck: |   errcheck: | ||||||
|     exclude-functions: |     exclude-functions: | ||||||
|  |  | ||||||
							
								
								
									
										4
									
								
								Makefile
								
								
								
								
							
							
						
						
									
										4
									
								
								Makefile
								
								
								
								
							|  | @ -6,7 +6,7 @@ endif | ||||||
| DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) | DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) | ||||||
| VERSION ?= dev | VERSION ?= dev | ||||||
| COMMIT_SHA = $(shell git rev-parse HEAD) | COMMIT_SHA = $(shell git rev-parse HEAD) | ||||||
| RUNNER_VERSION ?= 2.315.0 | RUNNER_VERSION ?= 2.316.1 | ||||||
| TARGETPLATFORM ?= $(shell arch) | TARGETPLATFORM ?= $(shell arch) | ||||||
| RUNNER_NAME ?= ${DOCKER_USER}/actions-runner | RUNNER_NAME ?= ${DOCKER_USER}/actions-runner | ||||||
| RUNNER_TAG  ?= ${VERSION} | RUNNER_TAG  ?= ${VERSION} | ||||||
|  | @ -68,7 +68,7 @@ endif | ||||||
| all: manager | all: manager | ||||||
| 
 | 
 | ||||||
| lint: | lint: | ||||||
| 	docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.55.2 golangci-lint run | 	docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.57.2 golangci-lint run | ||||||
| 
 | 
 | ||||||
| GO_TEST_ARGS ?= -short | GO_TEST_ARGS ?= -short | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -15,13 +15,13 @@ type: application | ||||||
| # This is the chart version. This version number should be incremented each time you make changes | # This is the chart version. This version number should be incremented each time you make changes | ||||||
| # to the chart and its templates, including the app version. | # to the chart and its templates, including the app version. | ||||||
| # Versions are expected to follow Semantic Versioning (https://semver.org/) | # Versions are expected to follow Semantic Versioning (https://semver.org/) | ||||||
| version: 0.9.1 | version: 0.9.2 | ||||||
| 
 | 
 | ||||||
| # This is the version number of the application being deployed. This version number should be | # This is the version number of the application being deployed. This version number should be | ||||||
| # incremented each time you make changes to the application. Versions are not expected to | # incremented each time you make changes to the application. Versions are not expected to | ||||||
| # follow Semantic Versioning. They should reflect the version the application is using. | # follow Semantic Versioning. They should reflect the version the application is using. | ||||||
| # It is recommended to use it with quotes. | # It is recommended to use it with quotes. | ||||||
| appVersion: "0.9.1" | appVersion: "0.9.2" | ||||||
| 
 | 
 | ||||||
| home: https://github.com/actions/actions-runner-controller | home: https://github.com/actions/actions-runner-controller | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -126,7 +126,3 @@ Create the name of the service account to use | ||||||
| {{- end }} | {{- end }} | ||||||
| {{- $names | join ","}} | {{- $names | join ","}} | ||||||
| {{- end }} | {{- end }} | ||||||
| 
 |  | ||||||
| {{- define "gha-runner-scale-set-controller.serviceMonitorName" -}} |  | ||||||
| {{- include "gha-runner-scale-set-controller.fullname" . }}-service-monitor |  | ||||||
| {{- end }} |  | ||||||
|  |  | ||||||
|  | @ -15,13 +15,13 @@ type: application | ||||||
| # This is the chart version. This version number should be incremented each time you make changes | # This is the chart version. This version number should be incremented each time you make changes | ||||||
| # to the chart and its templates, including the app version. | # to the chart and its templates, including the app version. | ||||||
| # Versions are expected to follow Semantic Versioning (https://semver.org/) | # Versions are expected to follow Semantic Versioning (https://semver.org/) | ||||||
| version: 0.9.1 | version: 0.9.2 | ||||||
| 
 | 
 | ||||||
| # This is the version number of the application being deployed. This version number should be | # This is the version number of the application being deployed. This version number should be | ||||||
| # incremented each time you make changes to the application. Versions are not expected to | # incremented each time you make changes to the application. Versions are not expected to | ||||||
| # follow Semantic Versioning. They should reflect the version the application is using. | # follow Semantic Versioning. They should reflect the version the application is using. | ||||||
| # It is recommended to use it with quotes. | # It is recommended to use it with quotes. | ||||||
| appVersion: "0.9.1" | appVersion: "0.9.2" | ||||||
| 
 | 
 | ||||||
| home: https://github.com/actions/actions-runner-controller | home: https://github.com/actions/actions-runner-controller | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -295,8 +295,23 @@ func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessa | ||||||
| 
 | 
 | ||||||
| func (l *Listener) deleteLastMessage(ctx context.Context) error { | func (l *Listener) deleteLastMessage(ctx context.Context) error { | ||||||
| 	l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID) | 	l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID) | ||||||
| 	if err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID); err != nil { | 	err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID) | ||||||
| 		return fmt.Errorf("failed to delete message: %w", err) | 	if err == nil { // if NO error
 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	expiredError := &actions.MessageQueueTokenExpiredError{} | ||||||
|  | 	if !errors.As(err, &expiredError) { | ||||||
|  | 		return fmt.Errorf("failed to delete last message: %w", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := l.refreshSession(ctx); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	err = l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("failed to delete last message after message session refresh: %w", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
|  |  | ||||||
|  | @ -377,6 +377,93 @@ func TestListener_deleteLastMessage(t *testing.T) { | ||||||
| 		err = l.deleteLastMessage(ctx) | 		err = l.deleteLastMessage(ctx) | ||||||
| 		assert.NotNil(t, err) | 		assert.NotNil(t, err) | ||||||
| 	}) | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("RefreshAndSucceeds", func(t *testing.T) { | ||||||
|  | 		t.Parallel() | ||||||
|  | 
 | ||||||
|  | 		ctx := context.Background() | ||||||
|  | 		config := Config{ | ||||||
|  | 			ScaleSetID: 1, | ||||||
|  | 			Metrics:    metrics.Discard, | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		client := listenermocks.NewClient(t) | ||||||
|  | 
 | ||||||
|  | 		newUUID := uuid.New() | ||||||
|  | 		session := &actions.RunnerScaleSetSession{ | ||||||
|  | 			SessionId:               &newUUID, | ||||||
|  | 			OwnerName:               "example", | ||||||
|  | 			RunnerScaleSet:          &actions.RunnerScaleSet{}, | ||||||
|  | 			MessageQueueUrl:         "https://example.com", | ||||||
|  | 			MessageQueueAccessToken: "1234567890", | ||||||
|  | 			Statistics:              nil, | ||||||
|  | 		} | ||||||
|  | 		client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once() | ||||||
|  | 
 | ||||||
|  | 		client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Once() | ||||||
|  | 
 | ||||||
|  | 		client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool { | ||||||
|  | 			return lastMessageID.(int64) == int64(5) | ||||||
|  | 		})).Return(nil).Once() | ||||||
|  | 		config.Client = client | ||||||
|  | 
 | ||||||
|  | 		l, err := New(config) | ||||||
|  | 		require.Nil(t, err) | ||||||
|  | 
 | ||||||
|  | 		oldUUID := uuid.New() | ||||||
|  | 		l.session = &actions.RunnerScaleSetSession{ | ||||||
|  | 			SessionId:      &oldUUID, | ||||||
|  | 			RunnerScaleSet: &actions.RunnerScaleSet{}, | ||||||
|  | 		} | ||||||
|  | 		l.lastMessageID = 5 | ||||||
|  | 
 | ||||||
|  | 		config.Client = client | ||||||
|  | 
 | ||||||
|  | 		err = l.deleteLastMessage(ctx) | ||||||
|  | 		assert.NoError(t, err) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("RefreshAndFails", func(t *testing.T) { | ||||||
|  | 		t.Parallel() | ||||||
|  | 
 | ||||||
|  | 		ctx := context.Background() | ||||||
|  | 		config := Config{ | ||||||
|  | 			ScaleSetID: 1, | ||||||
|  | 			Metrics:    metrics.Discard, | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		client := listenermocks.NewClient(t) | ||||||
|  | 
 | ||||||
|  | 		newUUID := uuid.New() | ||||||
|  | 		session := &actions.RunnerScaleSetSession{ | ||||||
|  | 			SessionId:               &newUUID, | ||||||
|  | 			OwnerName:               "example", | ||||||
|  | 			RunnerScaleSet:          &actions.RunnerScaleSet{}, | ||||||
|  | 			MessageQueueUrl:         "https://example.com", | ||||||
|  | 			MessageQueueAccessToken: "1234567890", | ||||||
|  | 			Statistics:              nil, | ||||||
|  | 		} | ||||||
|  | 		client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once() | ||||||
|  | 
 | ||||||
|  | 		client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Twice() | ||||||
|  | 
 | ||||||
|  | 		config.Client = client | ||||||
|  | 
 | ||||||
|  | 		l, err := New(config) | ||||||
|  | 		require.Nil(t, err) | ||||||
|  | 
 | ||||||
|  | 		oldUUID := uuid.New() | ||||||
|  | 		l.session = &actions.RunnerScaleSetSession{ | ||||||
|  | 			SessionId:      &oldUUID, | ||||||
|  | 			RunnerScaleSet: &actions.RunnerScaleSet{}, | ||||||
|  | 		} | ||||||
|  | 		l.lastMessageID = 5 | ||||||
|  | 
 | ||||||
|  | 		config.Client = client | ||||||
|  | 
 | ||||||
|  | 		err = l.deleteLastMessage(ctx) | ||||||
|  | 		assert.Error(t, err) | ||||||
|  | 	}) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestListener_Listen(t *testing.T) { | func TestListener_Listen(t *testing.T) { | ||||||
|  |  | ||||||
|  | @ -41,7 +41,7 @@ type Worker struct { | ||||||
| 	clientset *kubernetes.Clientset | 	clientset *kubernetes.Clientset | ||||||
| 	config    Config | 	config    Config | ||||||
| 	lastPatch int | 	lastPatch int | ||||||
| 	lastPatchID int | 	patchSeq  int | ||||||
| 	logger    *logr.Logger | 	logger    *logr.Logger | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -51,7 +51,7 @@ func New(config Config, options ...Option) (*Worker, error) { | ||||||
| 	w := &Worker{ | 	w := &Worker{ | ||||||
| 		config:    config, | 		config:    config, | ||||||
| 		lastPatch: -1, | 		lastPatch: -1, | ||||||
| 		lastPatchID: -1, | 		patchSeq:  -1, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	conf, err := rest.InClusterConfig() | 	conf, err := rest.InClusterConfig() | ||||||
|  | @ -163,27 +163,8 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart | ||||||
| // The function then scales the ephemeral runner set by applying the merge patch.
 | // The function then scales the ephemeral runner set by applying the merge patch.
 | ||||||
| // Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
 | // Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
 | ||||||
| // If any error occurs during the process, it returns an error with a descriptive message.
 | // If any error occurs during the process, it returns an error with a descriptive message.
 | ||||||
| func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) { | func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) { | ||||||
| 	// Max runners should always be set by the resource builder either to the configured value,
 | 	patchID := w.setDesiredWorkerState(count, jobsCompleted) | ||||||
| 	// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
 |  | ||||||
| 	targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners) |  | ||||||
| 
 |  | ||||||
| 	logValues := []any{ |  | ||||||
| 		"assigned job", count, |  | ||||||
| 		"decision", targetRunnerCount, |  | ||||||
| 		"min", w.config.MinRunners, |  | ||||||
| 		"max", w.config.MaxRunners, |  | ||||||
| 		"currentRunnerCount", w.lastPatch, |  | ||||||
| 		"jobsCompleted", jobsCompleted, |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if count == 0 && jobsCompleted == 0 { |  | ||||||
| 		w.lastPatchID = 0 |  | ||||||
| 	} else { |  | ||||||
| 		w.lastPatchID++ |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	w.lastPatch = targetRunnerCount |  | ||||||
| 
 | 
 | ||||||
| 	original, err := json.Marshal( | 	original, err := json.Marshal( | ||||||
| 		&v1alpha1.EphemeralRunnerSet{ | 		&v1alpha1.EphemeralRunnerSet{ | ||||||
|  | @ -200,8 +181,8 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo | ||||||
| 	patch, err := json.Marshal( | 	patch, err := json.Marshal( | ||||||
| 		&v1alpha1.EphemeralRunnerSet{ | 		&v1alpha1.EphemeralRunnerSet{ | ||||||
| 			Spec: v1alpha1.EphemeralRunnerSetSpec{ | 			Spec: v1alpha1.EphemeralRunnerSetSpec{ | ||||||
| 				Replicas: targetRunnerCount, | 				Replicas: w.lastPatch, | ||||||
| 				PatchID:  w.lastPatchID, | 				PatchID:  patchID, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 	) | 	) | ||||||
|  | @ -210,14 +191,13 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo | ||||||
| 		return 0, err | 		return 0, err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	w.logger.Info("Compare", "original", string(original), "patch", string(patch)) | ||||||
| 	mergePatch, err := jsonpatch.CreateMergePatch(original, patch) | 	mergePatch, err := jsonpatch.CreateMergePatch(original, patch) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return 0, fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err) | 		return 0, fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	w.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch)) | 	w.logger.Info("Preparing EphemeralRunnerSet update", "json", string(mergePatch)) | ||||||
| 
 |  | ||||||
| 	w.logger.Info("Scaling ephemeral runner set", logValues...) |  | ||||||
| 
 | 
 | ||||||
| 	patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{} | 	patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{} | ||||||
| 	err = w.clientset.RESTClient(). | 	err = w.clientset.RESTClient(). | ||||||
|  | @ -238,5 +218,40 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo | ||||||
| 		"name", w.config.EphemeralRunnerSetName, | 		"name", w.config.EphemeralRunnerSetName, | ||||||
| 		"replicas", patchedEphemeralRunnerSet.Spec.Replicas, | 		"replicas", patchedEphemeralRunnerSet.Spec.Replicas, | ||||||
| 	) | 	) | ||||||
| 	return targetRunnerCount, nil | 	return w.lastPatch, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // calculateDesiredState calculates the desired state of the worker based on the desired count and the the number of jobs completed.
 | ||||||
|  | func (w *Worker) setDesiredWorkerState(count, jobsCompleted int) int { | ||||||
|  | 	// Max runners should always be set by the resource builder either to the configured value,
 | ||||||
|  | 	// or the maximum int32 (resourcebuilder.newAutoScalingListener()).
 | ||||||
|  | 	targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners) | ||||||
|  | 	w.patchSeq++ | ||||||
|  | 	desiredPatchID := w.patchSeq | ||||||
|  | 
 | ||||||
|  | 	if count == 0 && jobsCompleted == 0 { // empty batch
 | ||||||
|  | 		targetRunnerCount = max(w.lastPatch, targetRunnerCount) | ||||||
|  | 		if targetRunnerCount == w.config.MinRunners { | ||||||
|  | 			// We have an empty batch, and the last patch was the min runners.
 | ||||||
|  | 			// Since this is an empty batch, and we are at the min runners, they should all be idle.
 | ||||||
|  | 			// If controller created few more pods on accident (during scale down events),
 | ||||||
|  | 			// this situation allows the controller to scale down to the min runners.
 | ||||||
|  | 			// However, it is important to keep the patch sequence increasing so we don't ignore one batch.
 | ||||||
|  | 			desiredPatchID = 0 | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	w.lastPatch = targetRunnerCount | ||||||
|  | 
 | ||||||
|  | 	w.logger.Info( | ||||||
|  | 		"Calculated target runner count", | ||||||
|  | 		"assigned job", count, | ||||||
|  | 		"decision", targetRunnerCount, | ||||||
|  | 		"min", w.config.MinRunners, | ||||||
|  | 		"max", w.config.MaxRunners, | ||||||
|  | 		"currentRunnerCount", w.lastPatch, | ||||||
|  | 		"jobsCompleted", jobsCompleted, | ||||||
|  | 	) | ||||||
|  | 
 | ||||||
|  | 	return desiredPatchID | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -0,0 +1,326 @@ | ||||||
|  | package worker | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"math" | ||||||
|  | 	"testing" | ||||||
|  | 
 | ||||||
|  | 	"github.com/go-logr/logr" | ||||||
|  | 	"github.com/stretchr/testify/assert" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func TestSetDesiredWorkerState_MinMaxDefaults(t *testing.T) { | ||||||
|  | 	logger := logr.Discard() | ||||||
|  | 	newEmptyWorker := func() *Worker { | ||||||
|  | 		return &Worker{ | ||||||
|  | 			config: Config{ | ||||||
|  | 				MinRunners: 0, | ||||||
|  | 				MaxRunners: math.MaxInt32, | ||||||
|  | 			}, | ||||||
|  | 			lastPatch: -1, | ||||||
|  | 			patchSeq:  -1, | ||||||
|  | 			logger:    &logger, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	t.Run("init calculate with acquired 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 0, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("init calculate with acquired 1", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(1, 0) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("increment patch when job done", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(1, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 1) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 0, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("increment patch when called with same parameters", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(1, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(1, 0) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("calculate desired scale when acquired > 0 and completed > 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(1, 1) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("re-use the last state when acquired == 0 and completed == 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(1, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("adjust when acquired == 0 and completed == 1", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(1, 1) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 1) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 0, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestSetDesiredWorkerState_MinSet(t *testing.T) { | ||||||
|  | 	logger := logr.Discard() | ||||||
|  | 	newEmptyWorker := func() *Worker { | ||||||
|  | 		return &Worker{ | ||||||
|  | 			config: Config{ | ||||||
|  | 				MinRunners: 1, | ||||||
|  | 				MaxRunners: math.MaxInt32, | ||||||
|  | 			}, | ||||||
|  | 			lastPatch: -1, | ||||||
|  | 			patchSeq:  -1, | ||||||
|  | 			logger:    &logger, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(2, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 3, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("request back to 0 on job done", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(2, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 1) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("desired patch is 0 but sequence continues on empty batch and min runners", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(3, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 4, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 
 | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 3) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 
 | ||||||
|  | 		// Empty batch on min runners
 | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) // forcing the state
 | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 2, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestSetDesiredWorkerState_MaxSet(t *testing.T) { | ||||||
|  | 	logger := logr.Discard() | ||||||
|  | 	newEmptyWorker := func() *Worker { | ||||||
|  | 		return &Worker{ | ||||||
|  | 			config: Config{ | ||||||
|  | 				MinRunners: 0, | ||||||
|  | 				MaxRunners: 5, | ||||||
|  | 			}, | ||||||
|  | 			lastPatch: -1, | ||||||
|  | 			patchSeq:  -1, | ||||||
|  | 			logger:    &logger, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 0, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(2, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 2, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("request back to 0 on job done", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(2, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 1) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 0, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("scale up to max when count > max", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(6, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 5, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("scale to max when count == max", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		w.setDesiredWorkerState(5, 0) | ||||||
|  | 		assert.Equal(t, 5, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("scale to max when count > max and completed > 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(1, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(6, 1) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 5, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("scale back to 0 when count was > max", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(6, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 1) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 0, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(3, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 3, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 
 | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 3) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 0, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 
 | ||||||
|  | 		// Empty batch on min runners
 | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) // forcing the state
 | ||||||
|  | 		assert.Equal(t, 0, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 2, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestSetDesiredWorkerState_MinMaxSet(t *testing.T) { | ||||||
|  | 	logger := logr.Discard() | ||||||
|  | 	newEmptyWorker := func() *Worker { | ||||||
|  | 		return &Worker{ | ||||||
|  | 			config: Config{ | ||||||
|  | 				MinRunners: 1, | ||||||
|  | 				MaxRunners: 3, | ||||||
|  | 			}, | ||||||
|  | 			lastPatch: -1, | ||||||
|  | 			patchSeq:  -1, | ||||||
|  | 			logger:    &logger, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(2, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 3, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("scale to min when count == 0", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(2, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 1) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("scale up to max when count > max", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(4, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 3, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("scale to max when count == max", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(3, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 3, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) { | ||||||
|  | 		w := newEmptyWorker() | ||||||
|  | 		patchID := w.setDesiredWorkerState(3, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) | ||||||
|  | 		assert.Equal(t, 3, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 0, w.patchSeq) | ||||||
|  | 
 | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 3) | ||||||
|  | 		assert.Equal(t, 1, patchID) | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 1, w.patchSeq) | ||||||
|  | 
 | ||||||
|  | 		// Empty batch on min runners
 | ||||||
|  | 		patchID = w.setDesiredWorkerState(0, 0) | ||||||
|  | 		assert.Equal(t, 0, patchID) // forcing the state
 | ||||||
|  | 		assert.Equal(t, 1, w.lastPatch) | ||||||
|  | 		assert.Equal(t, 2, w.patchSeq) | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | @ -690,30 +690,6 @@ func (r *AutoscalingListenerReconciler) publishRunningListener(autoscalingListen | ||||||
| 
 | 
 | ||||||
| // SetupWithManager sets up the controller with the Manager.
 | // SetupWithManager sets up the controller with the Manager.
 | ||||||
| func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error { | func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error { | ||||||
| 	groupVersionIndexer := func(rawObj client.Object) []string { |  | ||||||
| 		groupVersion := v1alpha1.GroupVersion.String() |  | ||||||
| 		owner := metav1.GetControllerOf(rawObj) |  | ||||||
| 		if owner == nil { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// ...make sure it is owned by this controller
 |  | ||||||
| 		if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// ...and if so, return it
 |  | ||||||
| 		return []string{owner.Name} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	labelBasedWatchFunc := func(_ context.Context, obj client.Object) []reconcile.Request { | 	labelBasedWatchFunc := func(_ context.Context, obj client.Object) []reconcile.Request { | ||||||
| 		var requests []reconcile.Request | 		var requests []reconcile.Request | ||||||
| 		labels := obj.GetLabels() | 		labels := obj.GetLabels() | ||||||
|  |  | ||||||
|  | @ -21,7 +21,7 @@ import ( | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| 	"k8s.io/apimachinery/pkg/types" | 	"k8s.io/apimachinery/pkg/types" | ||||||
| 
 | 
 | ||||||
| 	actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" | 	"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| const ( | const ( | ||||||
|  | @ -34,9 +34,9 @@ var _ = Describe("Test AutoScalingListener controller", func() { | ||||||
| 	var ctx context.Context | 	var ctx context.Context | ||||||
| 	var mgr ctrl.Manager | 	var mgr ctrl.Manager | ||||||
| 	var autoscalingNS *corev1.Namespace | 	var autoscalingNS *corev1.Namespace | ||||||
| 	var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet | 	var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet | ||||||
| 	var configSecret *corev1.Secret | 	var configSecret *corev1.Secret | ||||||
| 	var autoscalingListener *actionsv1alpha1.AutoscalingListener | 	var autoscalingListener *v1alpha1.AutoscalingListener | ||||||
| 
 | 
 | ||||||
| 	BeforeEach(func() { | 	BeforeEach(func() { | ||||||
| 		ctx = context.Background() | 		ctx = context.Background() | ||||||
|  | @ -53,12 +53,12 @@ var _ = Describe("Test AutoScalingListener controller", func() { | ||||||
| 
 | 
 | ||||||
| 		min := 1 | 		min := 1 | ||||||
| 		max := 10 | 		max := 10 | ||||||
| 		autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ | 		autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asrs", | 				Name:      "test-asrs", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ | 			Spec: v1alpha1.AutoscalingRunnerSetSpec{ | ||||||
| 				GitHubConfigUrl:    "https://github.com/owner/repo", | 				GitHubConfigUrl:    "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret: configSecret.Name, | 				GitHubConfigSecret: configSecret.Name, | ||||||
| 				MaxRunners:         &max, | 				MaxRunners:         &max, | ||||||
|  | @ -79,12 +79,12 @@ var _ = Describe("Test AutoScalingListener controller", func() { | ||||||
| 		err = k8sClient.Create(ctx, autoscalingRunnerSet) | 		err = k8sClient.Create(ctx, autoscalingRunnerSet) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | ||||||
| 
 | 
 | ||||||
| 		autoscalingListener = &actionsv1alpha1.AutoscalingListener{ | 		autoscalingListener = &v1alpha1.AutoscalingListener{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asl", | 				Name:      "test-asl", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingListenerSpec{ | 			Spec: v1alpha1.AutoscalingListenerSpec{ | ||||||
| 				GitHubConfigUrl:               "https://github.com/owner/repo", | 				GitHubConfigUrl:               "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret:            configSecret.Name, | 				GitHubConfigSecret:            configSecret.Name, | ||||||
| 				RunnerScaleSetId:              1, | 				RunnerScaleSetId:              1, | ||||||
|  | @ -119,7 +119,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { | ||||||
| 			).Should(Succeed(), "Config secret should be created") | 			).Should(Succeed(), "Config secret should be created") | ||||||
| 
 | 
 | ||||||
| 			// Check if finalizer is added
 | 			// Check if finalizer is added
 | ||||||
| 			created := new(actionsv1alpha1.AutoscalingListener) | 			created := new(v1alpha1.AutoscalingListener) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (string, error) { | 				func() (string, error) { | ||||||
| 					err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created) | 					err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created) | ||||||
|  | @ -298,7 +298,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { | ||||||
| 			// The AutoScalingListener should be deleted
 | 			// The AutoScalingListener should be deleted
 | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() error { | 				func() error { | ||||||
| 					listenerList := new(actionsv1alpha1.AutoscalingListenerList) | 					listenerList := new(v1alpha1.AutoscalingListenerList) | ||||||
| 					err := k8sClient.List(ctx, listenerList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{".metadata.name": autoscalingListener.Name}) | 					err := k8sClient.List(ctx, listenerList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{".metadata.name": autoscalingListener.Name}) | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 						return err | 						return err | ||||||
|  | @ -415,9 +415,9 @@ var _ = Describe("Test AutoScalingListener customization", func() { | ||||||
| 	var ctx context.Context | 	var ctx context.Context | ||||||
| 	var mgr ctrl.Manager | 	var mgr ctrl.Manager | ||||||
| 	var autoscalingNS *corev1.Namespace | 	var autoscalingNS *corev1.Namespace | ||||||
| 	var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet | 	var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet | ||||||
| 	var configSecret *corev1.Secret | 	var configSecret *corev1.Secret | ||||||
| 	var autoscalingListener *actionsv1alpha1.AutoscalingListener | 	var autoscalingListener *v1alpha1.AutoscalingListener | ||||||
| 
 | 
 | ||||||
| 	var runAsUser int64 = 1001 | 	var runAsUser int64 = 1001 | ||||||
| 
 | 
 | ||||||
|  | @ -458,12 +458,12 @@ var _ = Describe("Test AutoScalingListener customization", func() { | ||||||
| 
 | 
 | ||||||
| 		min := 1 | 		min := 1 | ||||||
| 		max := 10 | 		max := 10 | ||||||
| 		autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ | 		autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asrs", | 				Name:      "test-asrs", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ | 			Spec: v1alpha1.AutoscalingRunnerSetSpec{ | ||||||
| 				GitHubConfigUrl:    "https://github.com/owner/repo", | 				GitHubConfigUrl:    "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret: configSecret.Name, | 				GitHubConfigSecret: configSecret.Name, | ||||||
| 				MaxRunners:         &max, | 				MaxRunners:         &max, | ||||||
|  | @ -484,12 +484,12 @@ var _ = Describe("Test AutoScalingListener customization", func() { | ||||||
| 		err = k8sClient.Create(ctx, autoscalingRunnerSet) | 		err = k8sClient.Create(ctx, autoscalingRunnerSet) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | ||||||
| 
 | 
 | ||||||
| 		autoscalingListener = &actionsv1alpha1.AutoscalingListener{ | 		autoscalingListener = &v1alpha1.AutoscalingListener{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asltest", | 				Name:      "test-asltest", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingListenerSpec{ | 			Spec: v1alpha1.AutoscalingListenerSpec{ | ||||||
| 				GitHubConfigUrl:               "https://github.com/owner/repo", | 				GitHubConfigUrl:               "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret:            configSecret.Name, | 				GitHubConfigSecret:            configSecret.Name, | ||||||
| 				RunnerScaleSetId:              1, | 				RunnerScaleSetId:              1, | ||||||
|  | @ -512,7 +512,7 @@ var _ = Describe("Test AutoScalingListener customization", func() { | ||||||
| 	Context("When creating a new AutoScalingListener", func() { | 	Context("When creating a new AutoScalingListener", func() { | ||||||
| 		It("It should create customized pod with applied configuration", func() { | 		It("It should create customized pod with applied configuration", func() { | ||||||
| 			// Check if finalizer is added
 | 			// Check if finalizer is added
 | ||||||
| 			created := new(actionsv1alpha1.AutoscalingListener) | 			created := new(v1alpha1.AutoscalingListener) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (string, error) { | 				func() (string, error) { | ||||||
| 					err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created) | 					err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created) | ||||||
|  | @ -570,19 +570,19 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() { | ||||||
| 	var ctx context.Context | 	var ctx context.Context | ||||||
| 	var mgr ctrl.Manager | 	var mgr ctrl.Manager | ||||||
| 	var autoscalingNS *corev1.Namespace | 	var autoscalingNS *corev1.Namespace | ||||||
| 	var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet | 	var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet | ||||||
| 	var configSecret *corev1.Secret | 	var configSecret *corev1.Secret | ||||||
| 	var autoscalingListener *actionsv1alpha1.AutoscalingListener | 	var autoscalingListener *v1alpha1.AutoscalingListener | ||||||
| 
 | 
 | ||||||
| 	createRunnerSetAndListener := func(proxy *actionsv1alpha1.ProxyConfig) { | 	createRunnerSetAndListener := func(proxy *v1alpha1.ProxyConfig) { | ||||||
| 		min := 1 | 		min := 1 | ||||||
| 		max := 10 | 		max := 10 | ||||||
| 		autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ | 		autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asrs", | 				Name:      "test-asrs", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ | 			Spec: v1alpha1.AutoscalingRunnerSetSpec{ | ||||||
| 				GitHubConfigUrl:    "https://github.com/owner/repo", | 				GitHubConfigUrl:    "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret: configSecret.Name, | 				GitHubConfigSecret: configSecret.Name, | ||||||
| 				MaxRunners:         &max, | 				MaxRunners:         &max, | ||||||
|  | @ -604,12 +604,12 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() { | ||||||
| 		err := k8sClient.Create(ctx, autoscalingRunnerSet) | 		err := k8sClient.Create(ctx, autoscalingRunnerSet) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | ||||||
| 
 | 
 | ||||||
| 		autoscalingListener = &actionsv1alpha1.AutoscalingListener{ | 		autoscalingListener = &v1alpha1.AutoscalingListener{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asl", | 				Name:      "test-asl", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingListenerSpec{ | 			Spec: v1alpha1.AutoscalingListenerSpec{ | ||||||
| 				GitHubConfigUrl:               "https://github.com/owner/repo", | 				GitHubConfigUrl:               "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret:            configSecret.Name, | 				GitHubConfigSecret:            configSecret.Name, | ||||||
| 				RunnerScaleSetId:              1, | 				RunnerScaleSetId:              1, | ||||||
|  | @ -658,12 +658,12 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() { | ||||||
| 		err := k8sClient.Create(ctx, proxyCredentials) | 		err := k8sClient.Create(ctx, proxyCredentials) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to create proxy credentials secret") | 		Expect(err).NotTo(HaveOccurred(), "failed to create proxy credentials secret") | ||||||
| 
 | 
 | ||||||
| 		proxy := &actionsv1alpha1.ProxyConfig{ | 		proxy := &v1alpha1.ProxyConfig{ | ||||||
| 			HTTP: &actionsv1alpha1.ProxyServerConfig{ | 			HTTP: &v1alpha1.ProxyServerConfig{ | ||||||
| 				Url:                 "http://localhost:8080", | 				Url:                 "http://localhost:8080", | ||||||
| 				CredentialSecretRef: "proxy-credentials", | 				CredentialSecretRef: "proxy-credentials", | ||||||
| 			}, | 			}, | ||||||
| 			HTTPS: &actionsv1alpha1.ProxyServerConfig{ | 			HTTPS: &v1alpha1.ProxyServerConfig{ | ||||||
| 				Url:                 "https://localhost:8443", | 				Url:                 "https://localhost:8443", | ||||||
| 				CredentialSecretRef: "proxy-credentials", | 				CredentialSecretRef: "proxy-credentials", | ||||||
| 			}, | 			}, | ||||||
|  | @ -766,19 +766,19 @@ var _ = Describe("Test AutoScalingListener controller with template modification | ||||||
| 	var ctx context.Context | 	var ctx context.Context | ||||||
| 	var mgr ctrl.Manager | 	var mgr ctrl.Manager | ||||||
| 	var autoscalingNS *corev1.Namespace | 	var autoscalingNS *corev1.Namespace | ||||||
| 	var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet | 	var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet | ||||||
| 	var configSecret *corev1.Secret | 	var configSecret *corev1.Secret | ||||||
| 	var autoscalingListener *actionsv1alpha1.AutoscalingListener | 	var autoscalingListener *v1alpha1.AutoscalingListener | ||||||
| 
 | 
 | ||||||
| 	createRunnerSetAndListener := func(listenerTemplate *corev1.PodTemplateSpec) { | 	createRunnerSetAndListener := func(listenerTemplate *corev1.PodTemplateSpec) { | ||||||
| 		min := 1 | 		min := 1 | ||||||
| 		max := 10 | 		max := 10 | ||||||
| 		autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ | 		autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asrs", | 				Name:      "test-asrs", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ | 			Spec: v1alpha1.AutoscalingRunnerSetSpec{ | ||||||
| 				GitHubConfigUrl:    "https://github.com/owner/repo", | 				GitHubConfigUrl:    "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret: configSecret.Name, | 				GitHubConfigSecret: configSecret.Name, | ||||||
| 				MaxRunners:         &max, | 				MaxRunners:         &max, | ||||||
|  | @ -800,12 +800,12 @@ var _ = Describe("Test AutoScalingListener controller with template modification | ||||||
| 		err := k8sClient.Create(ctx, autoscalingRunnerSet) | 		err := k8sClient.Create(ctx, autoscalingRunnerSet) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | ||||||
| 
 | 
 | ||||||
| 		autoscalingListener = &actionsv1alpha1.AutoscalingListener{ | 		autoscalingListener = &v1alpha1.AutoscalingListener{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asl", | 				Name:      "test-asl", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingListenerSpec{ | 			Spec: v1alpha1.AutoscalingListenerSpec{ | ||||||
| 				GitHubConfigUrl:               "https://github.com/owner/repo", | 				GitHubConfigUrl:               "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret:            configSecret.Name, | 				GitHubConfigSecret:            configSecret.Name, | ||||||
| 				RunnerScaleSetId:              1, | 				RunnerScaleSetId:              1, | ||||||
|  | @ -915,9 +915,9 @@ var _ = Describe("Test GitHub Server TLS configuration", func() { | ||||||
| 	var ctx context.Context | 	var ctx context.Context | ||||||
| 	var mgr ctrl.Manager | 	var mgr ctrl.Manager | ||||||
| 	var autoscalingNS *corev1.Namespace | 	var autoscalingNS *corev1.Namespace | ||||||
| 	var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet | 	var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet | ||||||
| 	var configSecret *corev1.Secret | 	var configSecret *corev1.Secret | ||||||
| 	var autoscalingListener *actionsv1alpha1.AutoscalingListener | 	var autoscalingListener *v1alpha1.AutoscalingListener | ||||||
| 	var rootCAConfigMap *corev1.ConfigMap | 	var rootCAConfigMap *corev1.ConfigMap | ||||||
| 
 | 
 | ||||||
| 	BeforeEach(func() { | 	BeforeEach(func() { | ||||||
|  | @ -955,16 +955,16 @@ var _ = Describe("Test GitHub Server TLS configuration", func() { | ||||||
| 
 | 
 | ||||||
| 		min := 1 | 		min := 1 | ||||||
| 		max := 10 | 		max := 10 | ||||||
| 		autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ | 		autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asrs", | 				Name:      "test-asrs", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ | 			Spec: v1alpha1.AutoscalingRunnerSetSpec{ | ||||||
| 				GitHubConfigUrl:    "https://github.com/owner/repo", | 				GitHubConfigUrl:    "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret: configSecret.Name, | 				GitHubConfigSecret: configSecret.Name, | ||||||
| 				GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ | 				GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ | ||||||
| 					CertificateFrom: &actionsv1alpha1.TLSCertificateSource{ | 					CertificateFrom: &v1alpha1.TLSCertificateSource{ | ||||||
| 						ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ | 						ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ | ||||||
| 							LocalObjectReference: corev1.LocalObjectReference{ | 							LocalObjectReference: corev1.LocalObjectReference{ | ||||||
| 								Name: rootCAConfigMap.Name, | 								Name: rootCAConfigMap.Name, | ||||||
|  | @ -991,16 +991,16 @@ var _ = Describe("Test GitHub Server TLS configuration", func() { | ||||||
| 		err = k8sClient.Create(ctx, autoscalingRunnerSet) | 		err = k8sClient.Create(ctx, autoscalingRunnerSet) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | 		Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") | ||||||
| 
 | 
 | ||||||
| 		autoscalingListener = &actionsv1alpha1.AutoscalingListener{ | 		autoscalingListener = &v1alpha1.AutoscalingListener{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asl", | 				Name:      "test-asl", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.AutoscalingListenerSpec{ | 			Spec: v1alpha1.AutoscalingListenerSpec{ | ||||||
| 				GitHubConfigUrl:    "https://github.com/owner/repo", | 				GitHubConfigUrl:    "https://github.com/owner/repo", | ||||||
| 				GitHubConfigSecret: configSecret.Name, | 				GitHubConfigSecret: configSecret.Name, | ||||||
| 				GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ | 				GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ | ||||||
| 					CertificateFrom: &actionsv1alpha1.TLSCertificateSource{ | 					CertificateFrom: &v1alpha1.TLSCertificateSource{ | ||||||
| 						ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ | 						ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ | ||||||
| 							LocalObjectReference: corev1.LocalObjectReference{ | 							LocalObjectReference: corev1.LocalObjectReference{ | ||||||
| 								Name: rootCAConfigMap.Name, | 								Name: rootCAConfigMap.Name, | ||||||
|  |  | ||||||
|  | @ -30,7 +30,6 @@ import ( | ||||||
| 	corev1 "k8s.io/api/core/v1" | 	corev1 "k8s.io/api/core/v1" | ||||||
| 	rbacv1 "k8s.io/api/rbac/v1" | 	rbacv1 "k8s.io/api/rbac/v1" | ||||||
| 	kerrors "k8s.io/apimachinery/pkg/api/errors" | 	kerrors "k8s.io/apimachinery/pkg/api/errors" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |  | ||||||
| 	"k8s.io/apimachinery/pkg/runtime" | 	"k8s.io/apimachinery/pkg/runtime" | ||||||
| 	"k8s.io/apimachinery/pkg/types" | 	"k8s.io/apimachinery/pkg/types" | ||||||
| 	ctrl "sigs.k8s.io/controller-runtime" | 	ctrl "sigs.k8s.io/controller-runtime" | ||||||
|  | @ -759,26 +758,6 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Con | ||||||
| 
 | 
 | ||||||
| // SetupWithManager sets up the controller with the Manager.
 | // SetupWithManager sets up the controller with the Manager.
 | ||||||
| func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { | func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { | ||||||
| 	groupVersionIndexer := func(rawObj client.Object) []string { |  | ||||||
| 		groupVersion := v1alpha1.GroupVersion.String() |  | ||||||
| 		owner := metav1.GetControllerOf(rawObj) |  | ||||||
| 		if owner == nil { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// ...make sure it is owned by this controller
 |  | ||||||
| 		if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingRunnerSet" { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// ...and if so, return it
 |  | ||||||
| 		return []string{owner.Name} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return ctrl.NewControllerManagedBy(mgr). | 	return ctrl.NewControllerManagedBy(mgr). | ||||||
| 		For(&v1alpha1.AutoscalingRunnerSet{}). | 		For(&v1alpha1.AutoscalingRunnerSet{}). | ||||||
| 		Owns(&v1alpha1.EphemeralRunnerSet{}). | 		Owns(&v1alpha1.EphemeralRunnerSet{}). | ||||||
|  |  | ||||||
|  | @ -815,7 +815,6 @@ func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, | ||||||
| 
 | 
 | ||||||
| // SetupWithManager sets up the controller with the Manager.
 | // SetupWithManager sets up the controller with the Manager.
 | ||||||
| func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error { | func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error { | ||||||
| 	// TODO(nikola-jokic): Add indexing and filtering fields on corev1.Pod{}
 |  | ||||||
| 	return ctrl.NewControllerManagedBy(mgr). | 	return ctrl.NewControllerManagedBy(mgr). | ||||||
| 		For(&v1alpha1.EphemeralRunner{}). | 		For(&v1alpha1.EphemeralRunner{}). | ||||||
| 		Owns(&corev1.Pod{}). | 		Owns(&corev1.Pod{}). | ||||||
|  |  | ||||||
|  | @ -213,9 +213,6 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R | ||||||
| 			// on the next batch
 | 			// on the next batch
 | ||||||
| 		case ephemeralRunnerSet.Spec.PatchID == 0 && total > ephemeralRunnerSet.Spec.Replicas: | 		case ephemeralRunnerSet.Spec.PatchID == 0 && total > ephemeralRunnerSet.Spec.Replicas: | ||||||
| 			count := total - ephemeralRunnerSet.Spec.Replicas | 			count := total - ephemeralRunnerSet.Spec.Replicas | ||||||
| 			if count <= 0 { |  | ||||||
| 				break |  | ||||||
| 			} |  | ||||||
| 			log.Info("Deleting ephemeral runners (scale down)", "count", count) | 			log.Info("Deleting ephemeral runners (scale down)", "count", count) | ||||||
| 			if err := r.deleteIdleEphemeralRunners( | 			if err := r.deleteIdleEphemeralRunners( | ||||||
| 				ctx, | 				ctx, | ||||||
|  | @ -574,28 +571,6 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte | ||||||
| 
 | 
 | ||||||
| // SetupWithManager sets up the controller with the Manager.
 | // SetupWithManager sets up the controller with the Manager.
 | ||||||
| func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { | func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { | ||||||
| 	// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
 |  | ||||||
| 	if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string { |  | ||||||
| 		groupVersion := v1alpha1.GroupVersion.String() |  | ||||||
| 
 |  | ||||||
| 		// grab the job object, extract the owner...
 |  | ||||||
| 		ephemeralRunner := rawObj.(*v1alpha1.EphemeralRunner) |  | ||||||
| 		owner := metav1.GetControllerOf(ephemeralRunner) |  | ||||||
| 		if owner == nil { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// ...make sure it is owned by this controller
 |  | ||||||
| 		if owner.APIVersion != groupVersion || owner.Kind != "EphemeralRunnerSet" { |  | ||||||
| 			return nil |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// ...and if so, return it
 |  | ||||||
| 		return []string{owner.Name} |  | ||||||
| 	}); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return ctrl.NewControllerManagedBy(mgr). | 	return ctrl.NewControllerManagedBy(mgr). | ||||||
| 		For(&v1alpha1.EphemeralRunnerSet{}). | 		For(&v1alpha1.EphemeralRunnerSet{}). | ||||||
| 		Owns(&v1alpha1.EphemeralRunner{}). | 		Owns(&v1alpha1.EphemeralRunner{}). | ||||||
|  |  | ||||||
|  | @ -23,8 +23,7 @@ import ( | ||||||
| 	. "github.com/onsi/gomega" | 	. "github.com/onsi/gomega" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| 
 | 
 | ||||||
| 	actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" | 	"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" | ||||||
| 	v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" |  | ||||||
| 	"github.com/actions/actions-runner-controller/github/actions" | 	"github.com/actions/actions-runner-controller/github/actions" | ||||||
| 	"github.com/actions/actions-runner-controller/github/actions/fake" | 	"github.com/actions/actions-runner-controller/github/actions/fake" | ||||||
| 	"github.com/actions/actions-runner-controller/github/actions/testserver" | 	"github.com/actions/actions-runner-controller/github/actions/testserver" | ||||||
|  | @ -40,7 +39,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 	var ctx context.Context | 	var ctx context.Context | ||||||
| 	var mgr ctrl.Manager | 	var mgr ctrl.Manager | ||||||
| 	var autoscalingNS *corev1.Namespace | 	var autoscalingNS *corev1.Namespace | ||||||
| 	var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet | 	var ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet | ||||||
| 	var configSecret *corev1.Secret | 	var configSecret *corev1.Secret | ||||||
| 
 | 
 | ||||||
| 	BeforeEach(func() { | 	BeforeEach(func() { | ||||||
|  | @ -57,13 +56,13 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 		err := controller.SetupWithManager(mgr) | 		err := controller.SetupWithManager(mgr) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to setup controller") | 		Expect(err).NotTo(HaveOccurred(), "failed to setup controller") | ||||||
| 
 | 
 | ||||||
| 		ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ | 		ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asrs", | 				Name:      "test-asrs", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ | 			Spec: v1alpha1.EphemeralRunnerSetSpec{ | ||||||
| 				EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ | 				EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ | ||||||
| 					GitHubConfigUrl:    "https://github.com/owner/repo", | 					GitHubConfigUrl:    "https://github.com/owner/repo", | ||||||
| 					GitHubConfigSecret: configSecret.Name, | 					GitHubConfigSecret: configSecret.Name, | ||||||
| 					RunnerScaleSetId:   100, | 					RunnerScaleSetId:   100, | ||||||
|  | @ -90,7 +89,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 	Context("When creating a new EphemeralRunnerSet", func() { | 	Context("When creating a new EphemeralRunnerSet", func() { | ||||||
| 		It("It should create/add all required resources for a new EphemeralRunnerSet (finalizer)", func() { | 		It("It should create/add all required resources for a new EphemeralRunnerSet (finalizer)", func() { | ||||||
| 			// Check if finalizer is added
 | 			// Check if finalizer is added
 | ||||||
| 			created := new(actionsv1alpha1.EphemeralRunnerSet) | 			created := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (string, error) { | 				func() (string, error) { | ||||||
| 					err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) | 					err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) | ||||||
|  | @ -108,7 +107,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			// Check if the number of ephemeral runners are stay 0
 | 			// Check if the number of ephemeral runners are stay 0
 | ||||||
| 			Consistently( | 			Consistently( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 					runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 						return -1, err | 						return -1, err | ||||||
|  | @ -122,7 +121,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			// Check if the status stay 0
 | 			// Check if the status stay 0
 | ||||||
| 			Consistently( | 			Consistently( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					runnerSet := new(actionsv1alpha1.EphemeralRunnerSet) | 					runnerSet := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 					err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet) | 					err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet) | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 						return -1, err | 						return -1, err | ||||||
|  | @ -142,7 +141,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			// Check if the number of ephemeral runners are created
 | 			// Check if the number of ephemeral runners are created
 | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 					runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 						return -1, err | 						return -1, err | ||||||
|  | @ -176,7 +175,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			// Check if the status is updated
 | 			// Check if the status is updated
 | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					runnerSet := new(actionsv1alpha1.EphemeralRunnerSet) | 					runnerSet := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 					err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet) | 					err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet) | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 						return -1, err | 						return -1, err | ||||||
|  | @ -191,7 +190,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 
 | 
 | ||||||
| 	Context("When deleting a new EphemeralRunnerSet", func() { | 	Context("When deleting a new EphemeralRunnerSet", func() { | ||||||
| 		It("It should cleanup all resources for a deleting EphemeralRunnerSet before removing it", func() { | 		It("It should cleanup all resources for a deleting EphemeralRunnerSet before removing it", func() { | ||||||
| 			created := new(actionsv1alpha1.EphemeralRunnerSet) | 			created := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) | 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -204,7 +203,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			// Wait for the EphemeralRunnerSet to be scaled up
 | 			// Wait for the EphemeralRunnerSet to be scaled up
 | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 					runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 						return -1, err | 						return -1, err | ||||||
|  | @ -242,7 +241,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			// Check if all ephemeral runners are deleted
 | 			// Check if all ephemeral runners are deleted
 | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 					runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 						return -1, err | 						return -1, err | ||||||
|  | @ -256,7 +255,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			// Check if the EphemeralRunnerSet is deleted
 | 			// Check if the EphemeralRunnerSet is deleted
 | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() error { | 				func() error { | ||||||
| 					deleted := new(actionsv1alpha1.EphemeralRunnerSet) | 					deleted := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 					err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, deleted) | 					err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, deleted) | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 						if kerrors.IsNotFound(err) { | 						if kerrors.IsNotFound(err) { | ||||||
|  | @ -275,7 +274,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 
 | 
 | ||||||
| 	Context("When a new EphemeralRunnerSet scale up and down", func() { | 	Context("When a new EphemeralRunnerSet scale up and down", func() { | ||||||
| 		It("Should scale up with patch ID 0", func() { | 		It("Should scale up with patch ID 0", func() { | ||||||
| 			ers := new(actionsv1alpha1.EphemeralRunnerSet) | 			ers := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -286,7 +285,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -302,7 +301,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 		}) | 		}) | ||||||
| 
 | 
 | ||||||
| 		It("Should scale up when patch ID changes", func() { | 		It("Should scale up when patch ID changes", func() { | ||||||
| 			ers := new(actionsv1alpha1.EphemeralRunnerSet) | 			ers := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -313,7 +312,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -327,7 +326,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 				ephemeralRunnerSetTestInterval, | 				ephemeralRunnerSetTestInterval, | ||||||
| 			).Should(BeEquivalentTo(1), "1 EphemeralRunner should be created") | 			).Should(BeEquivalentTo(1), "1 EphemeralRunner should be created") | ||||||
| 
 | 
 | ||||||
| 			ers = new(actionsv1alpha1.EphemeralRunnerSet) | 			ers = new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -338,7 +337,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -354,7 +353,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 		}) | 		}) | ||||||
| 
 | 
 | ||||||
| 		It("Should clean up finished ephemeral runner when scaling down", func() { | 		It("Should clean up finished ephemeral runner when scaling down", func() { | ||||||
| 			ers := new(actionsv1alpha1.EphemeralRunnerSet) | 			ers := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -365,7 +364,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -390,7 +389,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | ||||||
| 
 | 
 | ||||||
| 			// Keep the ephemeral runner until the next patch
 | 			// Keep the ephemeral runner until the next patch
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -405,7 +404,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			).Should(BeEquivalentTo(2), "1 EphemeralRunner should be up") | 			).Should(BeEquivalentTo(2), "1 EphemeralRunner should be up") | ||||||
| 
 | 
 | ||||||
| 			// The listener was slower to patch the completed, but we should still have 1 running
 | 			// The listener was slower to patch the completed, but we should still have 1 running
 | ||||||
| 			ers = new(actionsv1alpha1.EphemeralRunnerSet) | 			ers = new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -416,7 +415,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -432,7 +431,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 		}) | 		}) | ||||||
| 
 | 
 | ||||||
| 		It("Should keep finished ephemeral runners until patch id changes", func() { | 		It("Should keep finished ephemeral runners until patch id changes", func() { | ||||||
| 			ers := new(actionsv1alpha1.EphemeralRunnerSet) | 			ers := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -443,7 +442,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -468,7 +467,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | ||||||
| 
 | 
 | ||||||
| 			// confirm they are not deleted
 | 			// confirm they are not deleted
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Consistently( | 			Consistently( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -484,7 +483,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 		}) | 		}) | ||||||
| 
 | 
 | ||||||
| 		It("Should handle double scale up", func() { | 		It("Should handle double scale up", func() { | ||||||
| 			ers := new(actionsv1alpha1.EphemeralRunnerSet) | 			ers := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -495,7 +494,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -520,7 +519,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) | 			err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | ||||||
| 
 | 
 | ||||||
| 			ers = new(actionsv1alpha1.EphemeralRunnerSet) | 			ers = new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -531,7 +530,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			// We should have 3 runners, and have no Succeeded ones
 | 			// We should have 3 runners, and have no Succeeded ones
 | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() error { | 				func() error { | ||||||
|  | @ -558,7 +557,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 		}) | 		}) | ||||||
| 
 | 
 | ||||||
| 		It("Should handle scale down without removing pending runners", func() { | 		It("Should handle scale down without removing pending runners", func() { | ||||||
| 			ers := new(actionsv1alpha1.EphemeralRunnerSet) | 			ers := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -569,7 +568,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -594,7 +593,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | ||||||
| 
 | 
 | ||||||
| 			// Wait for these statuses to actually be updated
 | 			// Wait for these statuses to actually be updated
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() error { | 				func() error { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -623,7 +622,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			).Should(BeNil(), "1 EphemeralRunner should be in Pending and 1 in Succeeded phase") | 			).Should(BeNil(), "1 EphemeralRunner should be in Pending and 1 in Succeeded phase") | ||||||
| 
 | 
 | ||||||
| 			// Scale down to 0, while 1 is still pending. This simulates the difference between the desired and actual state
 | 			// Scale down to 0, while 1 is still pending. This simulates the difference between the desired and actual state
 | ||||||
| 			ers = new(actionsv1alpha1.EphemeralRunnerSet) | 			ers = new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -634,7 +633,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			// We should have 1 runner up and pending
 | 			// We should have 1 runner up and pending
 | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() error { | 				func() error { | ||||||
|  | @ -678,7 +677,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 		}) | 		}) | ||||||
| 
 | 
 | ||||||
| 		It("Should kill pending and running runners if they are up for some reason and the batch contains no jobs", func() { | 		It("Should kill pending and running runners if they are up for some reason and the batch contains no jobs", func() { | ||||||
| 			ers := new(actionsv1alpha1.EphemeralRunnerSet) | 			ers := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -689,7 +688,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -715,7 +714,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | ||||||
| 
 | 
 | ||||||
| 			// Wait for these statuses to actually be updated
 | 			// Wait for these statuses to actually be updated
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() error { | 				func() error { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -748,7 +747,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 
 | 
 | ||||||
| 			// Scale down to 0 with patch ID 0. This forces the scale down to self correct on empty batch
 | 			// Scale down to 0 with patch ID 0. This forces the scale down to self correct on empty batch
 | ||||||
| 
 | 
 | ||||||
| 			ers = new(actionsv1alpha1.EphemeralRunnerSet) | 			ers = new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -759,7 +758,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Consistently( | 			Consistently( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -786,7 +785,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") | ||||||
| 
 | 
 | ||||||
| 			// Now, eventually, they should be deleted
 | 			// Now, eventually, they should be deleted
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -803,7 +802,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 		}) | 		}) | ||||||
| 
 | 
 | ||||||
| 		It("Should replace finished ephemeral runners with new ones", func() { | 		It("Should replace finished ephemeral runners with new ones", func() { | ||||||
| 			ers := new(actionsv1alpha1.EphemeralRunnerSet) | 			ers := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -814,7 +813,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -841,7 +840,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 
 | 
 | ||||||
| 			// Wait for these statuses to actually be updated
 | 			// Wait for these statuses to actually be updated
 | ||||||
| 
 | 
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() error { | 				func() error { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -874,7 +873,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			// Now, let's simulate replacement. The desired count is still 2.
 | 			// Now, let's simulate replacement. The desired count is still 2.
 | ||||||
| 			// This simulates that we got 1 job assigned, and 1 job completed.
 | 			// This simulates that we got 1 job assigned, and 1 job completed.
 | ||||||
| 
 | 
 | ||||||
| 			ers = new(actionsv1alpha1.EphemeralRunnerSet) | 			ers = new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | 			err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -885,7 +884,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | 			err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 			runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() error { | 				func() error { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -911,7 +910,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 		}) | 		}) | ||||||
| 
 | 
 | ||||||
| 		It("Should update status on Ephemeral Runner state changes", func() { | 		It("Should update status on Ephemeral Runner state changes", func() { | ||||||
| 			created := new(actionsv1alpha1.EphemeralRunnerSet) | 			created := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() error { | 				func() error { | ||||||
| 					return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) | 					return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) | ||||||
|  | @ -926,7 +925,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 			err := k8sClient.Update(ctx, updated) | 			err := k8sClient.Update(ctx, updated) | ||||||
| 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet replica count") | 			Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet replica count") | ||||||
| 
 | 
 | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (bool, error) { | 				func() (bool, error) { | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
|  | @ -1036,7 +1035,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { | ||||||
| 
 | 
 | ||||||
| 			Eventually( | 			Eventually( | ||||||
| 				func() (int, error) { | 				func() (int, error) { | ||||||
| 					runnerList = new(actionsv1alpha1.EphemeralRunnerList) | 					runnerList = new(v1alpha1.EphemeralRunnerList) | ||||||
| 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 					err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 						return -1, err | 						return -1, err | ||||||
|  | @ -1091,7 +1090,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( | ||||||
| 	var ctx context.Context | 	var ctx context.Context | ||||||
| 	var mgr ctrl.Manager | 	var mgr ctrl.Manager | ||||||
| 	var autoscalingNS *corev1.Namespace | 	var autoscalingNS *corev1.Namespace | ||||||
| 	var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet | 	var ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet | ||||||
| 	var configSecret *corev1.Secret | 	var configSecret *corev1.Secret | ||||||
| 
 | 
 | ||||||
| 	BeforeEach(func() { | 	BeforeEach(func() { | ||||||
|  | @ -1126,14 +1125,14 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( | ||||||
| 		err := k8sClient.Create(ctx, secretCredentials) | 		err := k8sClient.Create(ctx, secretCredentials) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to create secret credentials") | 		Expect(err).NotTo(HaveOccurred(), "failed to create secret credentials") | ||||||
| 
 | 
 | ||||||
| 		ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ | 		ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asrs", | 				Name:      "test-asrs", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ | 			Spec: v1alpha1.EphemeralRunnerSetSpec{ | ||||||
| 				Replicas: 1, | 				Replicas: 1, | ||||||
| 				EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ | 				EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ | ||||||
| 					GitHubConfigUrl:    "http://example.com/owner/repo", | 					GitHubConfigUrl:    "http://example.com/owner/repo", | ||||||
| 					GitHubConfigSecret: configSecret.Name, | 					GitHubConfigSecret: configSecret.Name, | ||||||
| 					RunnerScaleSetId:   100, | 					RunnerScaleSetId:   100, | ||||||
|  | @ -1193,7 +1192,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( | ||||||
| 		).Should(Succeed(), "compiled / flattened proxy secret should exist") | 		).Should(Succeed(), "compiled / flattened proxy secret should exist") | ||||||
| 
 | 
 | ||||||
| 		Eventually(func(g Gomega) { | 		Eventually(func(g Gomega) { | ||||||
| 			runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 			runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 			err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 			err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
| 			g.Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunners") | 			g.Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunners") | ||||||
| 
 | 
 | ||||||
|  | @ -1211,7 +1210,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( | ||||||
| 		// Set pods to PodSucceeded to simulate an actual EphemeralRunner stopping
 | 		// Set pods to PodSucceeded to simulate an actual EphemeralRunner stopping
 | ||||||
| 		Eventually( | 		Eventually( | ||||||
| 			func(g Gomega) (int, error) { | 			func(g Gomega) (int, error) { | ||||||
| 				runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 				runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 				err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 				err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
| 				if err != nil { | 				if err != nil { | ||||||
| 					return -1, err | 					return -1, err | ||||||
|  | @ -1293,14 +1292,14 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( | ||||||
| 			proxy.Close() | 			proxy.Close() | ||||||
| 		}) | 		}) | ||||||
| 
 | 
 | ||||||
| 		ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ | 		ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asrs", | 				Name:      "test-asrs", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ | 			Spec: v1alpha1.EphemeralRunnerSetSpec{ | ||||||
| 				Replicas: 1, | 				Replicas: 1, | ||||||
| 				EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ | 				EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ | ||||||
| 					GitHubConfigUrl:    "http://example.com/owner/repo", | 					GitHubConfigUrl:    "http://example.com/owner/repo", | ||||||
| 					GitHubConfigSecret: configSecret.Name, | 					GitHubConfigSecret: configSecret.Name, | ||||||
| 					RunnerScaleSetId:   100, | 					RunnerScaleSetId:   100, | ||||||
|  | @ -1327,7 +1326,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( | ||||||
| 		err = k8sClient.Create(ctx, ephemeralRunnerSet) | 		err = k8sClient.Create(ctx, ephemeralRunnerSet) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") | 		Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 		runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 		runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 		Eventually(func() (int, error) { | 		Eventually(func() (int, error) { | ||||||
| 			err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 			err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
|  | @ -1346,7 +1345,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( | ||||||
| 		err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) | 		err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") | 		Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") | ||||||
| 
 | 
 | ||||||
| 		runnerSet := new(actionsv1alpha1.EphemeralRunnerSet) | 		runnerSet := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 		err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, runnerSet) | 		err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, runnerSet) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 		Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  | @ -1369,7 +1368,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func( | ||||||
| 	var ctx context.Context | 	var ctx context.Context | ||||||
| 	var mgr ctrl.Manager | 	var mgr ctrl.Manager | ||||||
| 	var autoscalingNS *corev1.Namespace | 	var autoscalingNS *corev1.Namespace | ||||||
| 	var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet | 	var ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet | ||||||
| 	var configSecret *corev1.Secret | 	var configSecret *corev1.Secret | ||||||
| 	var rootCAConfigMap *corev1.ConfigMap | 	var rootCAConfigMap *corev1.ConfigMap | ||||||
| 
 | 
 | ||||||
|  | @ -1431,17 +1430,17 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func( | ||||||
| 		server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} | 		server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} | ||||||
| 		server.StartTLS() | 		server.StartTLS() | ||||||
| 
 | 
 | ||||||
| 		ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ | 		ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{ | ||||||
| 			ObjectMeta: metav1.ObjectMeta{ | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
| 				Name:      "test-asrs", | 				Name:      "test-asrs", | ||||||
| 				Namespace: autoscalingNS.Name, | 				Namespace: autoscalingNS.Name, | ||||||
| 			}, | 			}, | ||||||
| 			Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ | 			Spec: v1alpha1.EphemeralRunnerSetSpec{ | ||||||
| 				Replicas: 1, | 				Replicas: 1, | ||||||
| 				EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ | 				EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ | ||||||
| 					GitHubConfigUrl:    server.ConfigURLForOrg("my-org"), | 					GitHubConfigUrl:    server.ConfigURLForOrg("my-org"), | ||||||
| 					GitHubConfigSecret: configSecret.Name, | 					GitHubConfigSecret: configSecret.Name, | ||||||
| 					GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ | 					GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ | ||||||
| 						CertificateFrom: &v1alpha1.TLSCertificateSource{ | 						CertificateFrom: &v1alpha1.TLSCertificateSource{ | ||||||
| 							ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ | 							ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ | ||||||
| 								LocalObjectReference: corev1.LocalObjectReference{ | 								LocalObjectReference: corev1.LocalObjectReference{ | ||||||
|  | @ -1469,7 +1468,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func( | ||||||
| 		err = k8sClient.Create(ctx, ephemeralRunnerSet) | 		err = k8sClient.Create(ctx, ephemeralRunnerSet) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") | 		Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
| 		runnerList := new(actionsv1alpha1.EphemeralRunnerList) | 		runnerList := new(v1alpha1.EphemeralRunnerList) | ||||||
| 		Eventually(func() (int, error) { | 		Eventually(func() (int, error) { | ||||||
| 			err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | 			err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
|  | @ -1491,7 +1490,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func( | ||||||
| 		err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) | 		err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") | 		Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") | ||||||
| 
 | 
 | ||||||
| 		currentRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) | 		currentRunnerSet := new(v1alpha1.EphemeralRunnerSet) | ||||||
| 		err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, currentRunnerSet) | 		err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, currentRunnerSet) | ||||||
| 		Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | 		Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -18,6 +18,9 @@ const defaultGitHubToken = "gh_token" | ||||||
| 
 | 
 | ||||||
| func startManagers(t ginkgo.GinkgoTInterface, first manager.Manager, others ...manager.Manager) { | func startManagers(t ginkgo.GinkgoTInterface, first manager.Manager, others ...manager.Manager) { | ||||||
| 	for _, mgr := range append([]manager.Manager{first}, others...) { | 	for _, mgr := range append([]manager.Manager{first}, others...) { | ||||||
|  | 		if err := SetupIndexers(mgr); err != nil { | ||||||
|  | 			t.Fatalf("failed to setup indexers: %v", err) | ||||||
|  | 		} | ||||||
| 		ctx, cancel := context.WithCancel(context.Background()) | 		ctx, cancel := context.WithCancel(context.Background()) | ||||||
| 
 | 
 | ||||||
| 		g, ctx := errgroup.WithContext(ctx) | 		g, ctx := errgroup.WithContext(ctx) | ||||||
|  |  | ||||||
|  | @ -0,0 +1,71 @@ | ||||||
|  | package actionsgithubcom | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"slices" | ||||||
|  | 
 | ||||||
|  | 	v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" | ||||||
|  | 	corev1 "k8s.io/api/core/v1" | ||||||
|  | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
|  | 	ctrl "sigs.k8s.io/controller-runtime" | ||||||
|  | 	"sigs.k8s.io/controller-runtime/pkg/client" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func SetupIndexers(mgr ctrl.Manager) error { | ||||||
|  | 	if err := mgr.GetFieldIndexer().IndexField( | ||||||
|  | 		context.Background(), | ||||||
|  | 		&corev1.Pod{}, | ||||||
|  | 		resourceOwnerKey, | ||||||
|  | 		newGroupVersionOwnerKindIndexer("AutoscalingListener", "EphemeralRunner"), | ||||||
|  | 	); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := mgr.GetFieldIndexer().IndexField( | ||||||
|  | 		context.Background(), | ||||||
|  | 		&corev1.ServiceAccount{}, | ||||||
|  | 		resourceOwnerKey, | ||||||
|  | 		newGroupVersionOwnerKindIndexer("AutoscalingListener"), | ||||||
|  | 	); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := mgr.GetFieldIndexer().IndexField( | ||||||
|  | 		context.Background(), | ||||||
|  | 		&v1alpha1.EphemeralRunnerSet{}, | ||||||
|  | 		resourceOwnerKey, | ||||||
|  | 		newGroupVersionOwnerKindIndexer("AutoscalingRunnerSet"), | ||||||
|  | 	); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := mgr.GetFieldIndexer().IndexField( | ||||||
|  | 		context.Background(), | ||||||
|  | 		&v1alpha1.EphemeralRunner{}, | ||||||
|  | 		resourceOwnerKey, | ||||||
|  | 		newGroupVersionOwnerKindIndexer("EphemeralRunnerSet"), | ||||||
|  | 	); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func newGroupVersionOwnerKindIndexer(ownerKind string, otherOwnerKinds ...string) client.IndexerFunc { | ||||||
|  | 	owners := append([]string{ownerKind}, otherOwnerKinds...) | ||||||
|  | 	return func(o client.Object) []string { | ||||||
|  | 		groupVersion := v1alpha1.GroupVersion.String() | ||||||
|  | 		owner := metav1.GetControllerOfNoCopy(o) | ||||||
|  | 		if owner == nil { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// ...make sure it is owned by this controller
 | ||||||
|  | 		if owner.APIVersion != groupVersion || !slices.Contains(owners, owner.Kind) { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// ...and if so, return it
 | ||||||
|  | 		return []string{owner.Name} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | @ -85,13 +85,13 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1. | ||||||
| 		effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners | 		effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	labels := map[string]string{ | 	labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{ | ||||||
| 		LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, | 		LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, | ||||||
| 		LabelKeyGitHubScaleSetName:      autoscalingRunnerSet.Name, | 		LabelKeyGitHubScaleSetName:      autoscalingRunnerSet.Name, | ||||||
| 		LabelKeyKubernetesPartOf:        labelValueKubernetesPartOf, | 		LabelKeyKubernetesPartOf:        labelValueKubernetesPartOf, | ||||||
| 		LabelKeyKubernetesComponent:     "runner-scale-set-listener", | 		LabelKeyKubernetesComponent:     "runner-scale-set-listener", | ||||||
| 		LabelKeyKubernetesVersion:       autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], | 		LabelKeyKubernetesVersion:       autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], | ||||||
| 	} | 	}) | ||||||
| 
 | 
 | ||||||
| 	annotations := map[string]string{ | 	annotations := map[string]string{ | ||||||
| 		annotationKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), | 		annotationKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), | ||||||
|  | @ -411,10 +411,10 @@ func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
| 			Name:      scaleSetListenerServiceAccountName(autoscalingListener), | 			Name:      scaleSetListenerServiceAccountName(autoscalingListener), | ||||||
| 			Namespace: autoscalingListener.Namespace, | 			Namespace: autoscalingListener.Namespace, | ||||||
| 			Labels: map[string]string{ | 			Labels: mergeLabels(autoscalingListener.Labels, map[string]string{ | ||||||
| 				LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | 				LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | ||||||
| 				LabelKeyGitHubScaleSetName:      autoscalingListener.Spec.AutoscalingRunnerSetName, | 				LabelKeyGitHubScaleSetName:      autoscalingListener.Spec.AutoscalingRunnerSetName, | ||||||
| 			}, | 			}), | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | @ -426,13 +426,13 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1. | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
| 			Name:      scaleSetListenerRoleName(autoscalingListener), | 			Name:      scaleSetListenerRoleName(autoscalingListener), | ||||||
| 			Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | 			Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | ||||||
| 			Labels: map[string]string{ | 			Labels: mergeLabels(autoscalingListener.Labels, map[string]string{ | ||||||
| 				LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | 				LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | ||||||
| 				LabelKeyGitHubScaleSetName:      autoscalingListener.Spec.AutoscalingRunnerSetName, | 				LabelKeyGitHubScaleSetName:      autoscalingListener.Spec.AutoscalingRunnerSetName, | ||||||
| 				labelKeyListenerNamespace:       autoscalingListener.Namespace, | 				labelKeyListenerNamespace:       autoscalingListener.Namespace, | ||||||
| 				labelKeyListenerName:            autoscalingListener.Name, | 				labelKeyListenerName:            autoscalingListener.Name, | ||||||
| 				"role-policy-rules-hash":        rulesHash, | 				"role-policy-rules-hash":        rulesHash, | ||||||
| 			}, | 			}), | ||||||
| 		}, | 		}, | ||||||
| 		Rules: rules, | 		Rules: rules, | ||||||
| 	} | 	} | ||||||
|  | @ -460,14 +460,14 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1 | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
| 			Name:      scaleSetListenerRoleName(autoscalingListener), | 			Name:      scaleSetListenerRoleName(autoscalingListener), | ||||||
| 			Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | 			Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | ||||||
| 			Labels: map[string]string{ | 			Labels: mergeLabels(autoscalingListener.Labels, map[string]string{ | ||||||
| 				LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | 				LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | ||||||
| 				LabelKeyGitHubScaleSetName:      autoscalingListener.Spec.AutoscalingRunnerSetName, | 				LabelKeyGitHubScaleSetName:      autoscalingListener.Spec.AutoscalingRunnerSetName, | ||||||
| 				labelKeyListenerNamespace:       autoscalingListener.Namespace, | 				labelKeyListenerNamespace:       autoscalingListener.Namespace, | ||||||
| 				labelKeyListenerName:            autoscalingListener.Name, | 				labelKeyListenerName:            autoscalingListener.Name, | ||||||
| 				"role-binding-role-ref-hash":    roleRefHash, | 				"role-binding-role-ref-hash":    roleRefHash, | ||||||
| 				"role-binding-subject-hash":     subjectHash, | 				"role-binding-subject-hash":     subjectHash, | ||||||
| 			}, | 			}), | ||||||
| 		}, | 		}, | ||||||
| 		RoleRef:  roleRef, | 		RoleRef:  roleRef, | ||||||
| 		Subjects: subjects, | 		Subjects: subjects, | ||||||
|  | @ -483,11 +483,11 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
| 			Name:      scaleSetListenerSecretMirrorName(autoscalingListener), | 			Name:      scaleSetListenerSecretMirrorName(autoscalingListener), | ||||||
| 			Namespace: autoscalingListener.Namespace, | 			Namespace: autoscalingListener.Namespace, | ||||||
| 			Labels: map[string]string{ | 			Labels: mergeLabels(autoscalingListener.Labels, map[string]string{ | ||||||
| 				LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | 				LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, | ||||||
| 				LabelKeyGitHubScaleSetName:      autoscalingListener.Spec.AutoscalingRunnerSetName, | 				LabelKeyGitHubScaleSetName:      autoscalingListener.Spec.AutoscalingRunnerSetName, | ||||||
| 				"secret-data-hash":              dataHash, | 				"secret-data-hash":              dataHash, | ||||||
| 			}, | 			}), | ||||||
| 		}, | 		}, | ||||||
| 		Data: secret.DeepCopy().Data, | 		Data: secret.DeepCopy().Data, | ||||||
| 	} | 	} | ||||||
|  | @ -502,13 +502,13 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A | ||||||
| 	} | 	} | ||||||
| 	runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() | 	runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() | ||||||
| 
 | 
 | ||||||
| 	labels := map[string]string{ | 	labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{ | ||||||
| 		LabelKeyKubernetesPartOf:        labelValueKubernetesPartOf, | 		LabelKeyKubernetesPartOf:        labelValueKubernetesPartOf, | ||||||
| 		LabelKeyKubernetesComponent:     "runner-set", | 		LabelKeyKubernetesComponent:     "runner-set", | ||||||
| 		LabelKeyKubernetesVersion:       autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], | 		LabelKeyKubernetesVersion:       autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], | ||||||
| 		LabelKeyGitHubScaleSetName:      autoscalingRunnerSet.Name, | 		LabelKeyGitHubScaleSetName:      autoscalingRunnerSet.Name, | ||||||
| 		LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, | 		LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, | ||||||
| 	} | 	}) | ||||||
| 
 | 
 | ||||||
| 	if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil { | 	if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil { | ||||||
| 		return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err) | 		return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err) | ||||||
|  | @ -547,18 +547,14 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A | ||||||
| 
 | 
 | ||||||
| func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner { | func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner { | ||||||
| 	labels := make(map[string]string) | 	labels := make(map[string]string) | ||||||
| 	for _, key := range commonLabelKeys { | 	for k, v := range ephemeralRunnerSet.Labels { | ||||||
| 		switch key { | 		if k == LabelKeyKubernetesComponent { | ||||||
| 		case LabelKeyKubernetesComponent: | 			labels[k] = "runner" | ||||||
| 			labels[key] = "runner" | 		} else { | ||||||
| 		default: | 			labels[k] = v | ||||||
| 			v, ok := ephemeralRunnerSet.Labels[key] |  | ||||||
| 			if !ok { |  | ||||||
| 				continue |  | ||||||
| 			} |  | ||||||
| 			labels[key] = v |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
| 	annotations := make(map[string]string) | 	annotations := make(map[string]string) | ||||||
| 	for key, val := range ephemeralRunnerSet.Annotations { | 	for key, val := range ephemeralRunnerSet.Annotations { | ||||||
| 		annotations[key] = val | 		annotations[key] = val | ||||||
|  | @ -751,3 +747,17 @@ func trimLabelValue(val string) string { | ||||||
| 	} | 	} | ||||||
| 	return val | 	return val | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func mergeLabels(base, overwrite map[string]string) map[string]string { | ||||||
|  | 	mergedLabels := map[string]string{} | ||||||
|  | 
 | ||||||
|  | 	for k, v := range base { | ||||||
|  | 		mergedLabels[k] = v | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for k, v := range overwrite { | ||||||
|  | 		mergedLabels[k] = v | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return mergedLabels | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -21,6 +21,7 @@ func TestLabelPropagation(t *testing.T) { | ||||||
| 			Labels: map[string]string{ | 			Labels: map[string]string{ | ||||||
| 				LabelKeyKubernetesPartOf:  labelValueKubernetesPartOf, | 				LabelKeyKubernetesPartOf:  labelValueKubernetesPartOf, | ||||||
| 				LabelKeyKubernetesVersion: "0.2.0", | 				LabelKeyKubernetesVersion: "0.2.0", | ||||||
|  | 				"arbitrary-label":         "random-value", | ||||||
| 			}, | 			}, | ||||||
| 			Annotations: map[string]string{ | 			Annotations: map[string]string{ | ||||||
| 				runnerScaleSetIdAnnotationKey:         "1", | 				runnerScaleSetIdAnnotationKey:         "1", | ||||||
|  | @ -47,6 +48,7 @@ func TestLabelPropagation(t *testing.T) { | ||||||
| 	assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository]) | 	assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository]) | ||||||
| 	assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]) | 	assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]) | ||||||
| 	assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName]) | 	assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName]) | ||||||
|  | 	assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], ephemeralRunnerSet.Labels["arbitrary-label"]) | ||||||
| 
 | 
 | ||||||
| 	listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil) | 	listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil) | ||||||
| 	require.NoError(t, err) | 	require.NoError(t, err) | ||||||
|  | @ -59,6 +61,7 @@ func TestLabelPropagation(t *testing.T) { | ||||||
| 	assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise]) | 	assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise]) | ||||||
| 	assert.Equal(t, "org", listener.Labels[LabelKeyGitHubOrganization]) | 	assert.Equal(t, "org", listener.Labels[LabelKeyGitHubOrganization]) | ||||||
| 	assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository]) | 	assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository]) | ||||||
|  | 	assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], listener.Labels["arbitrary-label"]) | ||||||
| 
 | 
 | ||||||
| 	listenerServiceAccount := &corev1.ServiceAccount{ | 	listenerServiceAccount := &corev1.ServiceAccount{ | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
|  |  | ||||||
|  | @ -43,6 +43,14 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h | ||||||
| 
 | 
 | ||||||
| ## Changelog | ## Changelog | ||||||
| 
 | 
 | ||||||
|  | ### v0.9.2 | ||||||
|  | 
 | ||||||
|  | 1. Refresh session if token expires during delete message [#3529](https://github.com/actions/actions-runner-controller/pull/3529) | ||||||
|  | 1. Re-use the last desired patch on empty batch [#3453](https://github.com/actions/actions-runner-controller/pull/3453) | ||||||
|  | 1. Extract single place to set up indexers [#3454](https://github.com/actions/actions-runner-controller/pull/3454) | ||||||
|  | 1. Include controller version in logs [#3473](https://github.com/actions/actions-runner-controller/pull/3473) | ||||||
|  | 1. Propogate arbitrary labels from runnersets to all created resources [#3157](https://github.com/actions/actions-runner-controller/pull/3157) | ||||||
|  | 
 | ||||||
| ### v0.9.1 | ### v0.9.1 | ||||||
| 
 | 
 | ||||||
| #### Major changes | #### Major changes | ||||||
|  |  | ||||||
							
								
								
									
										8
									
								
								go.mod
								
								
								
								
							
							
						
						
									
										8
									
								
								go.mod
								
								
								
								
							|  | @ -18,16 +18,16 @@ require ( | ||||||
| 	github.com/kelseyhightower/envconfig v1.4.0 | 	github.com/kelseyhightower/envconfig v1.4.0 | ||||||
| 	github.com/onsi/ginkgo v1.16.5 | 	github.com/onsi/ginkgo v1.16.5 | ||||||
| 	github.com/onsi/ginkgo/v2 v2.17.1 | 	github.com/onsi/ginkgo/v2 v2.17.1 | ||||||
| 	github.com/onsi/gomega v1.30.0 | 	github.com/onsi/gomega v1.33.0 | ||||||
| 	github.com/pkg/errors v0.9.1 | 	github.com/pkg/errors v0.9.1 | ||||||
| 	github.com/prometheus/client_golang v1.17.0 | 	github.com/prometheus/client_golang v1.17.0 | ||||||
| 	github.com/stretchr/testify v1.9.0 | 	github.com/stretchr/testify v1.9.0 | ||||||
| 	github.com/teambition/rrule-go v1.8.2 | 	github.com/teambition/rrule-go v1.8.2 | ||||||
| 	go.uber.org/multierr v1.11.0 | 	go.uber.org/multierr v1.11.0 | ||||||
| 	go.uber.org/zap v1.26.0 | 	go.uber.org/zap v1.27.0 | ||||||
| 	golang.org/x/net v0.24.0 | 	golang.org/x/net v0.24.0 | ||||||
| 	golang.org/x/oauth2 v0.15.0 | 	golang.org/x/oauth2 v0.19.0 | ||||||
| 	golang.org/x/sync v0.6.0 | 	golang.org/x/sync v0.7.0 | ||||||
| 	gomodules.xyz/jsonpatch/v2 v2.4.0 | 	gomodules.xyz/jsonpatch/v2 v2.4.0 | ||||||
| 	gopkg.in/yaml.v2 v2.4.0 | 	gopkg.in/yaml.v2 v2.4.0 | ||||||
| 	k8s.io/api v0.28.4 | 	k8s.io/api v0.28.4 | ||||||
|  |  | ||||||
							
								
								
									
										20
									
								
								go.sum
								
								
								
								
							
							
						
						
									
										20
									
								
								go.sum
								
								
								
								
							|  | @ -173,8 +173,8 @@ github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8 | ||||||
| github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= | github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= | ||||||
| github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= | github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= | ||||||
| github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= | github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= | ||||||
| github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= | github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= | ||||||
| github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= | github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= | ||||||
| github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= | ||||||
| github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | ||||||
| github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= | ||||||
|  | @ -221,12 +221,12 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX | ||||||
| github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||||
| github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||||
| github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= | ||||||
| go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= | ||||||
| go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= | ||||||
| go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= | ||||||
| go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= | ||||||
| go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= | go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= | ||||||
| go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= | go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= | ||||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||||
|  | @ -255,16 +255,16 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= | ||||||
| golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= | golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= | ||||||
| golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= | golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= | ||||||
| golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= | golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= | ||||||
| golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= | golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= | ||||||
| golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= | golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= | ||||||
| golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= | golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= | ||||||
| golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= | golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= | ||||||
| golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||||
| golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||||
|  |  | ||||||
							
								
								
									
										14
									
								
								main.go
								
								
								
								
							
							
						
						
									
										14
									
								
								main.go
								
								
								
								
							|  | @ -239,6 +239,10 @@ func main() { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if autoScalingRunnerSetOnly { | 	if autoScalingRunnerSetOnly { | ||||||
|  | 		if err := actionsgithubcom.SetupIndexers(mgr); err != nil { | ||||||
|  | 			log.Error(err, "unable to setup indexers") | ||||||
|  | 			os.Exit(1) | ||||||
|  | 		} | ||||||
| 		managerImage := os.Getenv("CONTROLLER_MANAGER_CONTAINER_IMAGE") | 		managerImage := os.Getenv("CONTROLLER_MANAGER_CONTAINER_IMAGE") | ||||||
| 		if managerImage == "" { | 		if managerImage == "" { | ||||||
| 			log.Error(err, "unable to obtain listener image") | 			log.Error(err, "unable to obtain listener image") | ||||||
|  | @ -256,7 +260,7 @@ func main() { | ||||||
| 
 | 
 | ||||||
| 		if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{ | 		if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{ | ||||||
| 			Client:                             mgr.GetClient(), | 			Client:                             mgr.GetClient(), | ||||||
| 			Log:                                log.WithName("AutoscalingRunnerSet"), | 			Log:                                log.WithName("AutoscalingRunnerSet").WithValues("version", build.Version), | ||||||
| 			Scheme:                             mgr.GetScheme(), | 			Scheme:                             mgr.GetScheme(), | ||||||
| 			ControllerNamespace:                managerNamespace, | 			ControllerNamespace:                managerNamespace, | ||||||
| 			DefaultRunnerScaleSetListenerImage: managerImage, | 			DefaultRunnerScaleSetListenerImage: managerImage, | ||||||
|  | @ -270,7 +274,7 @@ func main() { | ||||||
| 
 | 
 | ||||||
| 		if err = (&actionsgithubcom.EphemeralRunnerReconciler{ | 		if err = (&actionsgithubcom.EphemeralRunnerReconciler{ | ||||||
| 			Client:        mgr.GetClient(), | 			Client:        mgr.GetClient(), | ||||||
| 			Log:           log.WithName("EphemeralRunner"), | 			Log:           log.WithName("EphemeralRunner").WithValues("version", build.Version), | ||||||
| 			Scheme:        mgr.GetScheme(), | 			Scheme:        mgr.GetScheme(), | ||||||
| 			ActionsClient: actionsMultiClient, | 			ActionsClient: actionsMultiClient, | ||||||
| 		}).SetupWithManager(mgr); err != nil { | 		}).SetupWithManager(mgr); err != nil { | ||||||
|  | @ -280,7 +284,7 @@ func main() { | ||||||
| 
 | 
 | ||||||
| 		if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{ | 		if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{ | ||||||
| 			Client:         mgr.GetClient(), | 			Client:         mgr.GetClient(), | ||||||
| 			Log:            log.WithName("EphemeralRunnerSet"), | 			Log:            log.WithName("EphemeralRunnerSet").WithValues("version", build.Version), | ||||||
| 			Scheme:         mgr.GetScheme(), | 			Scheme:         mgr.GetScheme(), | ||||||
| 			ActionsClient:  actionsMultiClient, | 			ActionsClient:  actionsMultiClient, | ||||||
| 			PublishMetrics: metricsAddr != "0", | 			PublishMetrics: metricsAddr != "0", | ||||||
|  | @ -291,7 +295,7 @@ func main() { | ||||||
| 
 | 
 | ||||||
| 		if err = (&actionsgithubcom.AutoscalingListenerReconciler{ | 		if err = (&actionsgithubcom.AutoscalingListenerReconciler{ | ||||||
| 			Client:                  mgr.GetClient(), | 			Client:                  mgr.GetClient(), | ||||||
| 			Log:                     log.WithName("AutoscalingListener"), | 			Log:                     log.WithName("AutoscalingListener").WithValues("version", build.Version), | ||||||
| 			Scheme:                  mgr.GetScheme(), | 			Scheme:                  mgr.GetScheme(), | ||||||
| 			ListenerMetricsAddr:     listenerMetricsAddr, | 			ListenerMetricsAddr:     listenerMetricsAddr, | ||||||
| 			ListenerMetricsEndpoint: listenerMetricsEndpoint, | 			ListenerMetricsEndpoint: listenerMetricsEndpoint, | ||||||
|  | @ -441,7 +445,7 @@ func main() { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	log.Info("starting manager") | 	log.Info("starting manager", "version", build.Version) | ||||||
| 	if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { | 	if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { | ||||||
| 		log.Error(err, "problem running manager") | 		log.Error(err, "problem running manager") | ||||||
| 		os.Exit(1) | 		os.Exit(1) | ||||||
|  |  | ||||||
|  | @ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless | ||||||
| OS_IMAGE ?= ubuntu-22.04 | OS_IMAGE ?= ubuntu-22.04 | ||||||
| TARGETPLATFORM ?= $(shell arch) | TARGETPLATFORM ?= $(shell arch) | ||||||
| 
 | 
 | ||||||
| RUNNER_VERSION ?= 2.315.0 | RUNNER_VERSION ?= 2.316.1 | ||||||
| RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.0 | RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.0 | ||||||
| DOCKER_VERSION ?= 24.0.7 | DOCKER_VERSION ?= 24.0.7 | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,2 +1,2 @@ | ||||||
| RUNNER_VERSION=2.315.0 | RUNNER_VERSION=2.316.1 | ||||||
| RUNNER_CONTAINER_HOOKS_VERSION=0.6.0 | RUNNER_CONTAINER_HOOKS_VERSION=0.6.0 | ||||||
|  | @ -36,7 +36,7 @@ var ( | ||||||
| 
 | 
 | ||||||
| 	testResultCMNamePrefix = "test-result-" | 	testResultCMNamePrefix = "test-result-" | ||||||
| 
 | 
 | ||||||
| 	RunnerVersion               = "2.315.0" | 	RunnerVersion               = "2.316.1" | ||||||
| 	RunnerContainerHooksVersion = "0.6.0" | 	RunnerContainerHooksVersion = "0.6.0" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue