From 963ae48a3fb2d98092e017d56b44f0658ce31ff3 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Tue, 16 Apr 2024 12:55:25 +0200 Subject: [PATCH 01/19] Include self correction on empty batch and avoid removing pending runners when cluster is busy (#3426) --- cmd/ghalistener/listener/listener.go | 5 + cmd/ghalistener/worker/worker.go | 8 +- .../autoscalingrunnerset_controller.go | 1 + .../ephemeralrunnerset_controller.go | 14 +- .../ephemeralrunnerset_controller_test.go | 766 +++++++++++++----- 5 files changed, 566 insertions(+), 228 deletions(-) diff --git a/cmd/ghalistener/listener/listener.go b/cmd/ghalistener/listener/listener.go index c9fc6801..56da0a8f 100644 --- a/cmd/ghalistener/listener/listener.go +++ b/cmd/ghalistener/listener/listener.go @@ -164,6 +164,11 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error { } if msg == nil { + _, err := handler.HandleDesiredRunnerCount(ctx, 0, 0) + if err != nil { + return fmt.Errorf("handling nil message failed: %w", err) + } + continue } diff --git a/cmd/ghalistener/worker/worker.go b/cmd/ghalistener/worker/worker.go index 36227535..25fb90e1 100644 --- a/cmd/ghalistener/worker/worker.go +++ b/cmd/ghalistener/worker/worker.go @@ -177,12 +177,12 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo "jobsCompleted", jobsCompleted, } - if w.lastPatch == targetRunnerCount && jobsCompleted == 0 { - w.logger.Info("Skipping patch", logValues...) - return targetRunnerCount, nil + if count == 0 && jobsCompleted == 0 { + w.lastPatchID = 0 + } else { + w.lastPatchID++ } - w.lastPatchID++ w.lastPatch = targetRunnerCount original, err := json.Marshal( diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 1d476e37..2ed654e8 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -277,6 +277,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl // need to scale down to 0 err := patch(ctx, r.Client, latestRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) { obj.Spec.Replicas = 0 + obj.Spec.PatchID = 0 }) if err != nil { log.Error(err, "Failed to patch runner set to set desired count to 0") diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index 12a22f1d..91319de8 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -197,7 +197,6 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R log.Error(err, "failed to cleanup finished ephemeral runners") } }() - log.Info("Scaling comparison", "current", total, "desired", ephemeralRunnerSet.Spec.Replicas) switch { case total < ephemeralRunnerSet.Spec.Replicas: // Handle scale up @@ -208,8 +207,16 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, err } - case total > ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario. + case ephemeralRunnerSet.Spec.PatchID > 0 && total >= ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario. + // If ephemeral runner did not yet update the phase to succeeded, but the scale down + // request is issued, we should ignore the scale down request. + // Eventually, the ephemeral runner will be cleaned up on the next patch request, which happens + // on the next batch + case ephemeralRunnerSet.Spec.PatchID == 0 && total > ephemeralRunnerSet.Spec.Replicas: count := total - ephemeralRunnerSet.Spec.Replicas + if count <= 0 { + break + } log.Info("Deleting ephemeral runners (scale down)", "count", count) if err := r.deleteIdleEphemeralRunners( ctx, @@ -428,6 +435,9 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep // When this happens, the next reconcile loop will try to delete the remaining ephemeral runners // after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates. func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error { + if count <= 0 { + return nil + } runners := newEphemeralRunnerStepper(pendingEphemeralRunners, runningEphemeralRunners) if runners.len() == 0 { log.Info("No pending or running ephemeral runners running at this time for scale down") diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index 271a3528..79ad2d6e 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -4,7 +4,6 @@ import ( "context" "crypto/tls" "encoding/base64" - "errors" "fmt" "net/http" "net/http/httptest" @@ -275,21 +274,18 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) Context("When a new EphemeralRunnerSet scale up and down", func() { - It("Should scale only on patch ID change", func() { - created := new(actionsv1alpha1.EphemeralRunnerSet) - err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) + It("Should scale up with patch ID 0", func() { + ers := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") - patchID := 1 - - // Scale up the EphemeralRunnerSet - updated := created.DeepCopy() + updated := ers.DeepCopy() updated.Spec.Replicas = 5 - updated.Spec.PatchID = patchID - err = k8sClient.Update(ctx, updated) + updated.Spec.PatchID = 0 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - // Wait for the EphemeralRunnerSet to be scaled up runnerList := new(actionsv1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { @@ -298,110 +294,282 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } - // Set status to simulate a configured EphemeralRunner - refetch := false - for i, runner := range runnerList.Items { - if runner.Status.RunnerId == 0 { - updatedRunner := runner.DeepCopy() - updatedRunner.Status.Phase = corev1.PodRunning - updatedRunner.Status.RunnerId = i + 100 - err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - refetch = true - } - } - - if refetch { - err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) - if err != nil { - return -1, err - } - } - return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestInterval, ).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created") + }) - // Mark one of the EphemeralRunner as finished - finishedRunner := runnerList.Items[4].DeepCopy() - finishedRunner.Status.Phase = corev1.PodSucceeded - err = k8sClient.Status().Patch(ctx, finishedRunner, client.MergeFrom(&runnerList.Items[4])) + It("Should scale up when patch ID changes", func() { + ers := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated := ers.DeepCopy() + updated.Spec.Replicas = 1 + updated.Spec.PatchID = 0 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(1), "1 EphemeralRunner should be created") + + ers = new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated = ers.DeepCopy() + updated.Spec.Replicas = 2 + updated.Spec.PatchID = 1 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + }) + + It("Should clean up finished ephemeral runner when scaling down", func() { + ers := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated := ers.DeepCopy() + updated.Spec.Replicas = 2 + updated.Spec.PatchID = 1 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + + updatedRunner := runnerList.Items[0].DeepCopy() + updatedRunner.Status.Phase = corev1.PodSucceeded + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0])) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - // Wait for the finished EphemeralRunner to be set to succeeded + updatedRunner = runnerList.Items[1].DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + // Keep the ephemeral runner until the next patch + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(2), "1 EphemeralRunner should be up") + + // The listener was slower to patch the completed, but we should still have 1 running + ers = new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated = ers.DeepCopy() + updated.Spec.Replicas = 1 + updated.Spec.PatchID = 2 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(1), "1 Ephemeral runner should be up") + }) + + It("Should keep finished ephemeral runners until patch id changes", func() { + ers := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated := ers.DeepCopy() + updated.Spec.Replicas = 2 + updated.Spec.PatchID = 1 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + + updatedRunner := runnerList.Items[0].DeepCopy() + updatedRunner.Status.Phase = corev1.PodSucceeded + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + updatedRunner = runnerList.Items[1].DeepCopy() + updatedRunner.Status.Phase = corev1.PodPending + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + // confirm they are not deleted + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Consistently( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + 5*time.Second, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + }) + + It("Should handle double scale up", func() { + ers := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated := ers.DeepCopy() + updated.Spec.Replicas = 2 + updated.Spec.PatchID = 1 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + + updatedRunner := runnerList.Items[0].DeepCopy() + updatedRunner.Status.Phase = corev1.PodSucceeded + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + updatedRunner = runnerList.Items[1].DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + ers = new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated = ers.DeepCopy() + updated.Spec.Replicas = 3 + updated.Spec.PatchID = 2 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + // We should have 3 runners, and have no Succeeded ones Eventually( func() error { - runnerList := new(actionsv1alpha1.EphemeralRunnerList) err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { return err } - for _, runner := range runnerList.Items { - if runner.Name != finishedRunner.Name { - continue - } - - if runner.Status.Phase != corev1.PodSucceeded { - return fmt.Errorf("EphemeralRunner is not finished") - } - // found pod succeeded - return nil - } - - return errors.New("Finished ephemeral runner is not found") - }, - ephemeralRunnerSetTestTimeout, - ephemeralRunnerSetTestInterval, - ).Should(Succeed(), "Finished EphemeralRunner should be deleted") - - // After one ephemeral runner is finished, simulate job done patch - patchID++ - original := new(actionsv1alpha1.EphemeralRunnerSet) - err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original) - Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") - updated = original.DeepCopy() - updated.Spec.PatchID = patchID - updated.Spec.Replicas = 4 - err = k8sClient.Patch(ctx, updated, client.MergeFrom(original)) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - - // Only finished ephemeral runner should be deleted - runnerList = new(actionsv1alpha1.EphemeralRunnerList) - Eventually( - func() (int, error) { - err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) - if err != nil { - return -1, err + if len(runnerList.Items) != 3 { + return fmt.Errorf("Expected 3 runners, got %d", len(runnerList.Items)) } for _, runner := range runnerList.Items { if runner.Status.Phase == corev1.PodSucceeded { - return -1, fmt.Errorf("Finished EphemeralRunner should be deleted") + return fmt.Errorf("Runner %s is in Succeeded phase", runner.Name) } } - return len(runnerList.Items), nil + return nil }, ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestInterval, - ).Should(BeEquivalentTo(4), "4 EphemeralRunner should be created") + ).Should(BeNil(), "3 EphemeralRunner should be created and none should be in Succeeded phase") + }) - // Scaling down the EphemeralRunnerSet - patchID++ - original = new(actionsv1alpha1.EphemeralRunnerSet) - err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original) + It("Should handle scale down without removing pending runners", func() { + ers := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") - updated = original.DeepCopy() - updated.Spec.PatchID = patchID - updated.Spec.Replicas = 3 - err = k8sClient.Patch(ctx, updated, client.MergeFrom(original)) + + updated := ers.DeepCopy() + updated.Spec.Replicas = 2 + updated.Spec.PatchID = 1 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - // Wait for the EphemeralRunnerSet to be scaled down - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(actionsv1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -409,150 +577,215 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } - // Set status to simulate a configured EphemeralRunner - refetch := false - for i, runner := range runnerList.Items { - if runner.Status.RunnerId == 0 { - updatedRunner := runner.DeepCopy() - updatedRunner.Status.Phase = corev1.PodRunning - updatedRunner.Status.RunnerId = i + 100 - err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - refetch = true - } - } - - if refetch { - err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) - if err != nil { - return -1, err - } - } - - return len(runnerList.Items), nil - }, - ephemeralRunnerSetTestTimeout, - ephemeralRunnerSetTestInterval, - ).Should(BeEquivalentTo(3), "3 EphemeralRunner should be created") - - // We will not scale down runner that is running jobs - runningRunner := runnerList.Items[0].DeepCopy() - runningRunner.Status.JobRequestId = 1000 - err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[0])) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - - runningRunner = runnerList.Items[1].DeepCopy() - runningRunner.Status.JobRequestId = 1001 - err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[1])) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - - // Scale down to 1 while 2 are running - patchID++ - original = new(actionsv1alpha1.EphemeralRunnerSet) - err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original) - Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") - updated = original.DeepCopy() - updated.Spec.PatchID = patchID - updated.Spec.Replicas = 1 - err = k8sClient.Patch(ctx, updated, client.MergeFrom(original)) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - - // Wait for the EphemeralRunnerSet to be scaled down to 2 since we still have 2 runner running jobs - runnerList = new(actionsv1alpha1.EphemeralRunnerList) - Eventually( - func() (int, error) { - err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) - if err != nil { - return -1, err - } - - // Set status to simulate a configured EphemeralRunner - refetch := false - for i, runner := range runnerList.Items { - if runner.Status.RunnerId == 0 { - updatedRunner := runner.DeepCopy() - updatedRunner.Status.Phase = corev1.PodRunning - updatedRunner.Status.RunnerId = i + 100 - err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - refetch = true - } - } - - if refetch { - err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) - if err != nil { - return -1, err - } - } - return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestInterval, ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") - // We will not scale down failed runner - failedRunner := runnerList.Items[0].DeepCopy() - failedRunner.Status.Phase = corev1.PodFailed - err = k8sClient.Status().Patch(ctx, failedRunner, client.MergeFrom(&runnerList.Items[0])) + updatedRunner := runnerList.Items[0].DeepCopy() + updatedRunner.Status.Phase = corev1.PodSucceeded + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0])) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + updatedRunner = runnerList.Items[1].DeepCopy() + updatedRunner.Status.Phase = corev1.PodPending + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + // Wait for these statuses to actually be updated runnerList = new(actionsv1alpha1.EphemeralRunnerList) Eventually( - func() (int, error) { + func() error { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { - return -1, err + return err } - - // Set status to simulate a configured EphemeralRunner - refetch := false - for i, runner := range runnerList.Items { - if runner.Status.RunnerId == 0 { - updatedRunner := runner.DeepCopy() - updatedRunner.Status.Phase = corev1.PodRunning - updatedRunner.Status.RunnerId = i + 100 - err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - refetch = true + pending := 0 + succeeded := 0 + for _, runner := range runnerList.Items { + switch runner.Status.Phase { + case corev1.PodSucceeded: + succeeded++ + case corev1.PodPending: + pending++ } } - if refetch { - err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) - if err != nil { - return -1, err - } + if pending != 1 && succeeded != 1 { + return fmt.Errorf("Expected 1 runner in Pending and 1 in Succeeded, got %d in Pending and %d in Succeeded", pending, succeeded) } - return len(runnerList.Items), nil + return nil }, ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestInterval, - ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + ).Should(BeNil(), "1 EphemeralRunner should be in Pending and 1 in Succeeded phase") - // We will scale down to 0 when the running job is completed and the failed runner is deleted - runningRunner = runnerList.Items[1].DeepCopy() - runningRunner.Status.Phase = corev1.PodSucceeded - err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[1])) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - - err = k8sClient.Delete(ctx, &runnerList.Items[0]) - Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunner") - - // Scale down to 0 while 1 ephemeral runner is failed - patchID++ - original = new(actionsv1alpha1.EphemeralRunnerSet) - err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, original) + // Scale down to 0, while 1 is still pending. This simulates the difference between the desired and actual state + ers = new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") - updated = original.DeepCopy() - updated.Spec.PatchID = patchID + + updated = ers.DeepCopy() updated.Spec.Replicas = 0 - err = k8sClient.Patch(ctx, updated, client.MergeFrom(original)) + updated.Spec.PatchID = 2 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - // Wait for the EphemeralRunnerSet to be scaled down to 0 + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + // We should have 1 runner up and pending + Eventually( + func() error { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return err + } + + if len(runnerList.Items) != 1 { + return fmt.Errorf("Expected 1 runner, got %d", len(runnerList.Items)) + } + + if runnerList.Items[0].Status.Phase != corev1.PodPending { + return fmt.Errorf("Expected runner to be in Pending, got %s", runnerList.Items[0].Status.Phase) + } + + return nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeNil(), "1 EphemeralRunner should be created and in Pending phase") + + // Now, the ephemeral runner finally is done and we can scale down to 0 + updatedRunner = runnerList.Items[0].DeepCopy() + updatedRunner.Status.Phase = corev1.PodSucceeded + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(0), "2 EphemeralRunner should be created") + }) + + It("Should kill pending and running runners if they are up for some reason and the batch contains no jobs", func() { + ers := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated := ers.DeepCopy() + updated.Spec.Replicas = 2 + updated.Spec.PatchID = 1 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + + // Put one runner in Pending and one in Running + updatedRunner := runnerList.Items[0].DeepCopy() + updatedRunner.Status.Phase = corev1.PodPending + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + updatedRunner = runnerList.Items[1].DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + // Wait for these statuses to actually be updated + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() error { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return err + } + + pending := 0 + running := 0 + + for _, runner := range runnerList.Items { + switch runner.Status.Phase { + case corev1.PodPending: + pending++ + case corev1.PodRunning: + running++ + + } + } + + if pending != 1 && running != 1 { + return fmt.Errorf("Expected 1 runner in Pending and 1 in Running, got %d in Pending and %d in Running", pending, running) + } + + return nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeNil(), "1 EphemeralRunner should be in Pending and 1 in Running phase") + + // Scale down to 0 with patch ID 0. This forces the scale down to self correct on empty batch + + ers = new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated = ers.DeepCopy() + updated.Spec.Replicas = 0 + updated.Spec.PatchID = 0 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Consistently( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err + } + + return len(runnerList.Items), nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be up since they don't have an ID yet") + + // Now, let's say ephemeral runner controller patched these ephemeral runners with the registration. + + updatedRunner = runnerList.Items[0].DeepCopy() + updatedRunner.Status.RunnerId = 1 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + updatedRunner = runnerList.Items[1].DeepCopy() + updatedRunner.Status.RunnerId = 2 + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + // Now, eventually, they should be deleted runnerList = new(actionsv1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { @@ -561,31 +794,120 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { return -1, err } - // Set status to simulate a configured EphemeralRunner - refetch := false - for i, runner := range runnerList.Items { - if runner.Status.RunnerId == 0 { - updatedRunner := runner.DeepCopy() - updatedRunner.Status.Phase = corev1.PodRunning - updatedRunner.Status.RunnerId = i + 100 - err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner)) - Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - refetch = true - } - } + return len(runnerList.Items), nil - if refetch { - err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) - if err != nil { - return -1, err - } + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeEquivalentTo(0), "0 EphemeralRunner should exist") + }) + + It("Should replace finished ephemeral runners with new ones", func() { + ers := new(actionsv1alpha1.EphemeralRunnerSet) + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated := ers.DeepCopy() + updated.Spec.Replicas = 2 + updated.Spec.PatchID = 1 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList := new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() (int, error) { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return -1, err } return len(runnerList.Items), nil }, ephemeralRunnerSetTestTimeout, ephemeralRunnerSetTestInterval, - ).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created") + ).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created") + + // Put one runner in Succeeded and one in Running + updatedRunner := runnerList.Items[0].DeepCopy() + updatedRunner.Status.Phase = corev1.PodSucceeded + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + updatedRunner = runnerList.Items[1].DeepCopy() + updatedRunner.Status.Phase = corev1.PodRunning + err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") + + // Wait for these statuses to actually be updated + + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() error { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return err + } + + succeeded := 0 + running := 0 + + for _, runner := range runnerList.Items { + switch runner.Status.Phase { + case corev1.PodSucceeded: + succeeded++ + case corev1.PodRunning: + running++ + } + } + + if succeeded != 1 && running != 1 { + return fmt.Errorf("Expected 1 runner in Succeeded and 1 in Running, got %d in Succeeded and %d in Running", succeeded, running) + } + + return nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeNil(), "1 EphemeralRunner should be in Succeeded and 1 in Running phase") + + // Now, let's simulate replacement. The desired count is still 2. + // This simulates that we got 1 job assigned, and 1 job completed. + + ers = new(actionsv1alpha1.EphemeralRunnerSet) + err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) + Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") + + updated = ers.DeepCopy() + updated.Spec.Replicas = 2 + updated.Spec.PatchID = 2 + + err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) + Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") + + runnerList = new(actionsv1alpha1.EphemeralRunnerList) + Eventually( + func() error { + err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) + if err != nil { + return err + } + + if len(runnerList.Items) != 2 { + return fmt.Errorf("Expected 2 runners, got %d", len(runnerList.Items)) + } + + for _, runner := range runnerList.Items { + if runner.Status.Phase == corev1.PodSucceeded { + return fmt.Errorf("Expected no runners in Succeeded phase, got one") + } + } + + return nil + }, + ephemeralRunnerSetTestTimeout, + ephemeralRunnerSetTestInterval, + ).Should(BeNil(), "2 EphemeralRunner should be created and none should be in Succeeded phase") }) It("Should update status on Ephemeral Runner state changes", func() { From 8075e5ee7430fa5f2881c844fa05ff9c94da1203 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Tue, 16 Apr 2024 12:57:44 +0200 Subject: [PATCH 02/19] Refactor actions client error to include request id (#3430) Co-authored-by: Francesco Renzi --- .../ephemeralrunner_controller.go | 25 +- .../ephemeralrunner_controller_test.go | 6 +- .../ephemeralrunnerset_controller.go | 13 +- github/actions/client.go | 233 ++++++++++++++---- .../client_runner_scale_set_message_test.go | 2 +- .../client_runner_scale_set_session_test.go | 12 +- .../actions/client_runner_scale_set_test.go | 9 +- github/actions/errors.go | 91 +++++-- github/actions/errors_test.go | 206 ++++++++++++++++ github/actions/github_api_request_test.go | 8 +- 10 files changed, 509 insertions(+), 96 deletions(-) create mode 100644 github/actions/errors_test.go diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index d90f6459..8765c1a0 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "net/http" - "strings" "time" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" @@ -295,14 +294,17 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ } func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) { - actionsError := &actions.ActionsError{} - err := r.deleteRunnerFromService(ctx, ephemeralRunner, log) - if err != nil { - if errors.As(err, &actionsError) && - actionsError.StatusCode == http.StatusBadRequest && - strings.Contains(actionsError.ExceptionName, "JobStillRunningException") { + if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil { + actionsError := &actions.ActionsError{} + if !errors.As(err, &actionsError) { + log.Error(err, "Failed to clean up runner from the service (not an ActionsError)") + return ctrl.Result{}, err + } + + if actionsError.StatusCode == http.StatusBadRequest && actionsError.IsException("JobStillRunningException") { log.Info("Runner is still running the job. Re-queue in 30 seconds") return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } log.Error(err, "Failed clean up runner from the service") @@ -310,10 +312,9 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context } log.Info("Successfully removed runner registration from service") - err = patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { + if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { controllerutil.RemoveFinalizer(obj, ephemeralRunnerActionsFinalizerName) - }) - if err != nil { + }); err != nil { return ctrl.Result{}, err } @@ -528,7 +529,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con } if actionsError.StatusCode != http.StatusConflict || - !strings.Contains(actionsError.ExceptionName, "AgentExistsException") { + !actionsError.IsException("AgentExistsException") { return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err) } @@ -784,7 +785,7 @@ func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Conte } if actionsError.StatusCode != http.StatusNotFound || - !strings.Contains(actionsError.ExceptionName, "AgentNotFoundException") { + !actionsError.IsException("AgentNotFoundException") { return false, fmt.Errorf("failed to check if runner exists in GitHub service: %v", err) } diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index 4cc6ffde..2d45d87a 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -671,8 +671,10 @@ var _ = Describe("EphemeralRunner", func() { fake.WithGetRunner( nil, &actions.ActionsError{ - StatusCode: http.StatusNotFound, - ExceptionName: "AgentNotFoundException", + StatusCode: http.StatusNotFound, + Err: &actions.ActionsExceptionError{ + ExceptionName: "AgentNotFoundException", + }, }, ), ), diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index 91319de8..b462e177 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -23,7 +23,6 @@ import ( "net/http" "sort" "strconv" - "strings" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics" @@ -483,10 +482,14 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) { if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil { actionsError := &actions.ActionsError{} - if errors.As(err, &actionsError) && - actionsError.StatusCode == http.StatusBadRequest && - strings.Contains(actionsError.ExceptionName, "JobStillRunningException") { - // Runner is still running a job, proceed with the next one + if !errors.As(err, &actionsError) { + log.Error(err, "failed to remove runner from the service", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId) + return false, err + } + + if actionsError.StatusCode == http.StatusBadRequest && + actionsError.IsException("JobStillRunningException") { + log.Info("Runner is still running a job, skipping deletion", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId) return false, nil } diff --git a/github/actions/client.go b/github/actions/client.go index 1afbab9c..3470384f 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -355,15 +355,22 @@ func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runne } var runnerScaleSetList *runnerScaleSetsResponse - err = json.NewDecoder(resp.Body).Decode(&runnerScaleSetList) - if err != nil { - return nil, err + if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSetList); err != nil { + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } if runnerScaleSetList.Count == 0 { return nil, nil } if runnerScaleSetList.Count > 1 { - return nil, fmt.Errorf("multiple runner scale sets found with name %s", runnerScaleSetName) + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: fmt.Errorf("multiple runner scale sets found with name %q", runnerScaleSetName), + } } return &runnerScaleSetList.RunnerScaleSets[0], nil @@ -386,9 +393,12 @@ func (c *Client) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int } var runnerScaleSet *RunnerScaleSet - err = json.NewDecoder(resp.Body).Decode(&runnerScaleSet) - if err != nil { - return nil, err + if err := json.NewDecoder(resp.Body).Decode(&runnerScaleSet); err != nil { + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } return runnerScaleSet, nil } @@ -408,23 +418,43 @@ func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) ( if resp.StatusCode != http.StatusOK { body, err := io.ReadAll(resp.Body) if err != nil { - return nil, err + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } - return nil, fmt.Errorf("unexpected status code: %d - body: %s", resp.StatusCode, string(body)) + return nil, fmt.Errorf("unexpected status code: %w", &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: errors.New(string(body)), + }) } var runnerGroupList *RunnerGroupList err = json.NewDecoder(resp.Body).Decode(&runnerGroupList) if err != nil { - return nil, err + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } if runnerGroupList.Count == 0 { - return nil, fmt.Errorf("no runner group found with name '%s'", runnerGroup) + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: fmt.Errorf("no runner group found with name %q", runnerGroup), + } } if runnerGroupList.Count > 1 { - return nil, fmt.Errorf("multiple runner group found with name %s", runnerGroup) + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: fmt.Errorf("multiple runner group found with name %q", runnerGroup), + } } return &runnerGroupList.RunnerGroups[0], nil @@ -450,9 +480,12 @@ func (c *Client) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *Runne return nil, ParseActionsErrorFromResponse(resp) } var createdRunnerScaleSet *RunnerScaleSet - err = json.NewDecoder(resp.Body).Decode(&createdRunnerScaleSet) - if err != nil { - return nil, err + if err := json.NewDecoder(resp.Body).Decode(&createdRunnerScaleSet); err != nil { + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } return createdRunnerScaleSet, nil } @@ -480,9 +513,12 @@ func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int, } var updatedRunnerScaleSet *RunnerScaleSet - err = json.NewDecoder(resp.Body).Decode(&updatedRunnerScaleSet) - if err != nil { - return nil, err + if err := json.NewDecoder(resp.Body).Decode(&updatedRunnerScaleSet); err != nil { + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } return updatedRunnerScaleSet, nil } @@ -547,15 +583,26 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc body, err := io.ReadAll(resp.Body) body = trimByteOrderMark(body) if err != nil { - return nil, err + return nil, &ActionsError{ + ActivityID: resp.Header.Get(HeaderActionsActivityID), + StatusCode: resp.StatusCode, + Err: err, + } + } + return nil, &MessageQueueTokenExpiredError{ + activityID: resp.Header.Get(HeaderActionsActivityID), + statusCode: resp.StatusCode, + msg: string(body), } - return nil, &MessageQueueTokenExpiredError{msg: string(body)} } var message *RunnerScaleSetMessage - err = json.NewDecoder(resp.Body).Decode(&message) - if err != nil { - return nil, err + if err := json.NewDecoder(resp.Body).Decode(&message); err != nil { + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } return message, nil } @@ -591,9 +638,17 @@ func (c *Client) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueu body, err := io.ReadAll(resp.Body) body = trimByteOrderMark(body) if err != nil { - return err + return &ActionsError{ + ActivityID: resp.Header.Get(HeaderActionsActivityID), + StatusCode: resp.StatusCode, + Err: err, + } + } + return &MessageQueueTokenExpiredError{ + activityID: resp.Header.Get(HeaderActionsActivityID), + statusCode: resp.StatusCode, + msg: string(body), } - return &MessageQueueTokenExpiredError{msg: string(body)} } return nil } @@ -641,9 +696,18 @@ func (c *Client) doSessionRequest(ctx context.Context, method, path string, requ } if resp.StatusCode == expectedResponseStatusCode { - if responseUnmarshalTarget != nil { - return json.NewDecoder(resp.Body).Decode(responseUnmarshalTarget) + if responseUnmarshalTarget == nil { + return nil } + + if err := json.NewDecoder(resp.Body).Decode(responseUnmarshalTarget); err != nil { + return &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } + } + return nil } @@ -655,10 +719,18 @@ func (c *Client) doSessionRequest(ctx context.Context, method, path string, requ body, err := io.ReadAll(resp.Body) body = trimByteOrderMark(body) if err != nil { - return err + return &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } - return fmt.Errorf("unexpected status code: %d - body: %s", resp.StatusCode, string(body)) + return fmt.Errorf("unexpected status code: %w", &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: errors.New(string(body)), + }) } func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) { @@ -692,16 +764,28 @@ func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQ body, err := io.ReadAll(resp.Body) body = trimByteOrderMark(body) if err != nil { - return nil, err + return nil, &ActionsError{ + ActivityID: resp.Header.Get(HeaderActionsActivityID), + StatusCode: resp.StatusCode, + Err: err, + } } - return nil, &MessageQueueTokenExpiredError{msg: string(body)} + return nil, &MessageQueueTokenExpiredError{ + activityID: resp.Header.Get(HeaderActionsActivityID), + statusCode: resp.StatusCode, + msg: string(body), + } } var acquiredJobs *Int64List err = json.NewDecoder(resp.Body).Decode(&acquiredJobs) if err != nil { - return nil, err + return nil, &ActionsError{ + ActivityID: resp.Header.Get(HeaderActionsActivityID), + StatusCode: resp.StatusCode, + Err: err, + } } return acquiredJobs.Value, nil @@ -732,7 +816,11 @@ func (c *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (* var acquirableJobList *AcquirableJobList err = json.NewDecoder(resp.Body).Decode(&acquirableJobList) if err != nil { - return nil, err + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } return acquirableJobList, nil @@ -761,9 +849,12 @@ func (c *Client) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting * } var runnerJitConfig *RunnerScaleSetJitRunnerConfig - err = json.NewDecoder(resp.Body).Decode(&runnerJitConfig) - if err != nil { - return nil, err + if err := json.NewDecoder(resp.Body).Decode(&runnerJitConfig); err != nil { + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } return runnerJitConfig, nil } @@ -786,9 +877,12 @@ func (c *Client) GetRunner(ctx context.Context, runnerId int64) (*RunnerReferenc } var runnerReference *RunnerReference - err = json.NewDecoder(resp.Body).Decode(&runnerReference) - if err != nil { - return nil, err + if err := json.NewDecoder(resp.Body).Decode(&runnerReference); err != nil { + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } return runnerReference, nil @@ -812,9 +906,12 @@ func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*Runne } var runnerList *RunnerReferenceList - err = json.NewDecoder(resp.Body).Decode(&runnerList) - if err != nil { - return nil, err + if err := json.NewDecoder(resp.Body).Decode(&runnerList); err != nil { + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: err, + } } if runnerList.Count == 0 { @@ -822,7 +919,11 @@ func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*Runne } if runnerList.Count > 1 { - return nil, fmt.Errorf("multiple runner found with name %s", runnerName) + return nil, &ActionsError{ + StatusCode: resp.StatusCode, + ActivityID: resp.Header.Get(HeaderActionsActivityID), + Err: fmt.Errorf("multiple runner found with name %s", runnerName), + } } return &runnerList.RunnerReferences[0], nil @@ -895,12 +996,20 @@ func (c *Client) getRunnerRegistrationToken(ctx context.Context) (*registrationT if err != nil { return nil, err } - return nil, fmt.Errorf("unexpected response from Actions service during registration token call: %v - %v", resp.StatusCode, string(body)) + return nil, &GitHubAPIError{ + StatusCode: resp.StatusCode, + RequestID: resp.Header.Get(HeaderGitHubRequestID), + Err: errors.New(string(body)), + } } var registrationToken *registrationToken if err := json.NewDecoder(resp.Body).Decode(®istrationToken); err != nil { - return nil, err + return nil, &GitHubAPIError{ + StatusCode: resp.StatusCode, + RequestID: resp.Header.Get(HeaderGitHubRequestID), + Err: err, + } } return registrationToken, nil @@ -937,8 +1046,14 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c // Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app var accessToken *accessToken - err = json.NewDecoder(resp.Body).Decode(&accessToken) - return accessToken, err + if err = json.NewDecoder(resp.Body).Decode(&accessToken); err != nil { + return nil, &GitHubAPIError{ + StatusCode: resp.StatusCode, + RequestID: resp.Header.Get(HeaderGitHubRequestID), + Err: err, + } + } + return accessToken, nil } type ActionsServiceAdminConnection struct { @@ -989,21 +1104,29 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis break } - errStr := fmt.Sprintf("unexpected response from Actions service during registration call: %v", resp.StatusCode) + var innerErr error body, err := io.ReadAll(resp.Body) if err != nil { - err = fmt.Errorf("%s - %w", errStr, err) + innerErr = err } else { - err = fmt.Errorf("%s - %v", errStr, string(body)) + innerErr = errors.New(string(body)) } if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { - return nil, err + return nil, &GitHubAPIError{ + StatusCode: resp.StatusCode, + RequestID: resp.Header.Get(HeaderGitHubRequestID), + Err: innerErr, + } } retry++ if retry > 3 { - return nil, fmt.Errorf("unable to register runner after 3 retries: %v", err) + return nil, fmt.Errorf("unable to register runner after 3 retries: %w", &GitHubAPIError{ + StatusCode: resp.StatusCode, + RequestID: resp.Header.Get(HeaderGitHubRequestID), + Err: innerErr, + }) } time.Sleep(time.Duration(500 * int(time.Millisecond) * (retry + 1))) @@ -1011,7 +1134,11 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis var actionsServiceAdminConnection *ActionsServiceAdminConnection if err := json.NewDecoder(resp.Body).Decode(&actionsServiceAdminConnection); err != nil { - return nil, err + return nil, &GitHubAPIError{ + StatusCode: resp.StatusCode, + RequestID: resp.Header.Get(HeaderGitHubRequestID), + Err: err, + } } return actionsServiceAdminConnection, nil diff --git a/github/actions/client_runner_scale_set_message_test.go b/github/actions/client_runner_scale_set_message_test.go index 0de77094..cb67c310 100644 --- a/github/actions/client_runner_scale_set_message_test.go +++ b/github/actions/client_runner_scale_set_message_test.go @@ -98,7 +98,7 @@ func TestGetMessage(t *testing.T) { t.Run("Status code not found", func(t *testing.T) { want := actions.ActionsError{ - Message: "Request returned status: 404 Not Found", + Err: errors.New("unknown exception"), StatusCode: 404, } server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { diff --git a/github/actions/client_runner_scale_set_session_test.go b/github/actions/client_runner_scale_set_session_test.go index 7b2ab69d..fff1b9f0 100644 --- a/github/actions/client_runner_scale_set_session_test.go +++ b/github/actions/client_runner_scale_set_session_test.go @@ -13,6 +13,8 @@ import ( "github.com/stretchr/testify/require" ) +const exampleRequestID = "5ddf2050-dae0-013c-9159-04421ad31b68" + func TestCreateMessageSession(t *testing.T) { ctx := context.Background() auth := &actions.ActionsAuth{ @@ -69,13 +71,17 @@ func TestCreateMessageSession(t *testing.T) { } want := &actions.ActionsError{ - ExceptionName: "CSharpExceptionNameHere", - Message: "could not do something", - StatusCode: http.StatusBadRequest, + ActivityID: exampleRequestID, + StatusCode: http.StatusBadRequest, + Err: &actions.ActionsExceptionError{ + ExceptionName: "CSharpExceptionNameHere", + Message: "could not do something", + }, } server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/json") + w.Header().Set(actions.HeaderActionsActivityID, exampleRequestID) w.WriteHeader(http.StatusBadRequest) resp := []byte(`{"typeName": "CSharpExceptionNameHere","message": "could not do something"}`) w.Write(resp) diff --git a/github/actions/client_runner_scale_set_test.go b/github/actions/client_runner_scale_set_test.go index 7f74190e..bb48ae5f 100644 --- a/github/actions/client_runner_scale_set_test.go +++ b/github/actions/client_runner_scale_set_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/actions/actions-runner-controller/github/actions" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -124,9 +125,15 @@ func TestGetRunnerScaleSet(t *testing.T) { }) t.Run("Multiple runner scale sets found", func(t *testing.T) { - wantErr := fmt.Errorf("multiple runner scale sets found with name %s", scaleSetName) + reqID := uuid.NewString() + wantErr := &actions.ActionsError{ + StatusCode: http.StatusOK, + ActivityID: reqID, + Err: fmt.Errorf("multiple runner scale sets found with name %q", scaleSetName), + } runnerScaleSetsResp := []byte(`{"count":2,"value":[{"id":1,"name":"ScaleSet"}]}`) server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.Header().Set(actions.HeaderActionsActivityID, reqID) w.Write(runnerScaleSetsResp) })) diff --git a/github/actions/errors.go b/github/actions/errors.go index 7f483a0a..736520bd 100644 --- a/github/actions/errors.go +++ b/github/actions/errors.go @@ -2,63 +2,118 @@ package actions import ( "encoding/json" + "errors" "fmt" "io" "net/http" "strings" ) +// Header names for request IDs +const ( + HeaderActionsActivityID = "ActivityId" + HeaderGitHubRequestID = "X-GitHub-Request-Id" +) + +type GitHubAPIError struct { + StatusCode int + RequestID string + Err error +} + +func (e *GitHubAPIError) Error() string { + return fmt.Sprintf("github api error: StatusCode %d, RequestID %q: %v", e.StatusCode, e.RequestID, e.Err) +} + +func (e *GitHubAPIError) Unwrap() error { + return e.Err +} + type ActionsError struct { - ExceptionName string `json:"typeName,omitempty"` - Message string `json:"message,omitempty"` - StatusCode int + ActivityID string + StatusCode int + Err error } func (e *ActionsError) Error() string { - return fmt.Sprintf("%v - had issue communicating with Actions backend: %v", e.StatusCode, e.Message) + return fmt.Sprintf("actions error: StatusCode %d, AcivityId %q: %v", e.StatusCode, e.ActivityID, e.Err) +} + +func (e *ActionsError) Unwrap() error { + return e.Err +} + +func (e *ActionsError) IsException(target string) bool { + if ex, ok := e.Err.(*ActionsExceptionError); ok { + return strings.Contains(ex.ExceptionName, target) + } + + return false +} + +type ActionsExceptionError struct { + ExceptionName string `json:"typeName,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *ActionsExceptionError) Error() string { + return fmt.Sprintf("%s: %s", e.ExceptionName, e.Message) } func ParseActionsErrorFromResponse(response *http.Response) error { if response.ContentLength == 0 { - message := "Request returned status: " + response.Status return &ActionsError{ - ExceptionName: "unknown", - Message: message, - StatusCode: response.StatusCode, + ActivityID: response.Header.Get(HeaderActionsActivityID), + StatusCode: response.StatusCode, + Err: errors.New("unknown exception"), } } defer response.Body.Close() body, err := io.ReadAll(response.Body) if err != nil { - return err + return &ActionsError{ + ActivityID: response.Header.Get(HeaderActionsActivityID), + StatusCode: response.StatusCode, + Err: err, + } } body = trimByteOrderMark(body) contentType, ok := response.Header["Content-Type"] if ok && len(contentType) > 0 && strings.Contains(contentType[0], "text/plain") { message := string(body) - statusCode := response.StatusCode return &ActionsError{ - Message: message, - StatusCode: statusCode, + ActivityID: response.Header.Get(HeaderActionsActivityID), + StatusCode: response.StatusCode, + Err: errors.New(message), } } - actionsError := &ActionsError{StatusCode: response.StatusCode} - if err := json.Unmarshal(body, &actionsError); err != nil { - return err + var exception ActionsExceptionError + if err := json.Unmarshal(body, &exception); err != nil { + return &ActionsError{ + ActivityID: response.Header.Get(HeaderActionsActivityID), + StatusCode: response.StatusCode, + Err: err, + } } - return actionsError + return &ActionsError{ + ActivityID: response.Header.Get(HeaderActionsActivityID), + StatusCode: response.StatusCode, + Err: &exception, + } } type MessageQueueTokenExpiredError struct { - msg string + activityID string + statusCode int + msg string } func (e *MessageQueueTokenExpiredError) Error() string { - return e.msg + return fmt.Sprintf("MessageQueueTokenExpiredError: AcivityId %q, StatusCode %d: %s", e.activityID, e.statusCode, e.msg) } type HttpClientSideError struct { diff --git a/github/actions/errors_test.go b/github/actions/errors_test.go new file mode 100644 index 00000000..46310ba1 --- /dev/null +++ b/github/actions/errors_test.go @@ -0,0 +1,206 @@ +package actions_test + +import ( + "errors" + "io" + "net/http" + "strings" + "testing" + + "github.com/actions/actions-runner-controller/github/actions" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestActionsError(t *testing.T) { + t.Run("contains the status code, activity ID, and error", func(t *testing.T) { + err := &actions.ActionsError{ + ActivityID: "activity-id", + StatusCode: 404, + Err: errors.New("example error description"), + } + + s := err.Error() + assert.Contains(t, s, "StatusCode 404") + assert.Contains(t, s, "AcivityId \"activity-id\"") + assert.Contains(t, s, "example error description") + }) + + t.Run("unwraps the error", func(t *testing.T) { + err := &actions.ActionsError{ + ActivityID: "activity-id", + StatusCode: 404, + Err: &actions.ActionsExceptionError{ + ExceptionName: "exception-name", + Message: "example error message", + }, + } + + assert.Equal(t, err.Unwrap(), err.Err) + }) + + t.Run("is exception is ok", func(t *testing.T) { + err := &actions.ActionsError{ + ActivityID: "activity-id", + StatusCode: 404, + Err: &actions.ActionsExceptionError{ + ExceptionName: "exception-name", + Message: "example error message", + }, + } + + var exception *actions.ActionsExceptionError + assert.True(t, errors.As(err, &exception)) + + assert.True(t, err.IsException("exception-name")) + }) + + t.Run("is exception is not ok", func(t *testing.T) { + tt := map[string]*actions.ActionsError{ + "not an exception": { + ActivityID: "activity-id", + StatusCode: 404, + Err: errors.New("example error description"), + }, + "not target exception": { + ActivityID: "activity-id", + StatusCode: 404, + Err: &actions.ActionsExceptionError{ + ExceptionName: "exception-name", + Message: "example error message", + }, + }, + } + + targetException := "target-exception" + for name, err := range tt { + t.Run(name, func(t *testing.T) { + assert.False(t, err.IsException(targetException)) + }) + } + }) +} + +func TestActionsExceptionError(t *testing.T) { + t.Run("contains the exception name and message", func(t *testing.T) { + err := &actions.ActionsExceptionError{ + ExceptionName: "exception-name", + Message: "example error message", + } + + s := err.Error() + assert.Contains(t, s, "exception-name") + assert.Contains(t, s, "example error message") + }) +} + +func TestGitHubAPIError(t *testing.T) { + t.Run("contains the status code, request ID, and error", func(t *testing.T) { + err := &actions.GitHubAPIError{ + StatusCode: 404, + RequestID: "request-id", + Err: errors.New("example error description"), + } + + s := err.Error() + assert.Contains(t, s, "StatusCode 404") + assert.Contains(t, s, "RequestID \"request-id\"") + assert.Contains(t, s, "example error description") + }) + + t.Run("unwraps the error", func(t *testing.T) { + err := &actions.GitHubAPIError{ + StatusCode: 404, + RequestID: "request-id", + Err: errors.New("example error description"), + } + + assert.Equal(t, err.Unwrap(), err.Err) + }) +} + +func ParseActionsErrorFromResponse(t *testing.T) { + t.Run("empty content length", func(t *testing.T) { + response := &http.Response{ + ContentLength: 0, + Header: http.Header{ + actions.HeaderActionsActivityID: []string{"activity-id"}, + }, + StatusCode: 404, + } + + err := actions.ParseActionsErrorFromResponse(response) + require.Error(t, err) + assert.Equal(t, err.(*actions.ActionsError).ActivityID, "activity-id") + assert.Equal(t, err.(*actions.ActionsError).StatusCode, 404) + assert.Equal(t, err.(*actions.ActionsError).Err.Error(), "unknown exception") + }) + + t.Run("contains text plain error", func(t *testing.T) { + errorMessage := "example error message" + response := &http.Response{ + ContentLength: int64(len(errorMessage)), + Header: http.Header{ + actions.HeaderActionsActivityID: []string{"activity-id"}, + "Content-Type": []string{"text/plain"}, + }, + StatusCode: 404, + Body: io.NopCloser(strings.NewReader(errorMessage)), + } + + err := actions.ParseActionsErrorFromResponse(response) + require.Error(t, err) + var actionsError *actions.ActionsError + assert.ErrorAs(t, err, &actionsError) + assert.Equal(t, actionsError.ActivityID, "activity-id") + assert.Equal(t, actionsError.StatusCode, 404) + assert.Equal(t, actionsError.Err.Error(), errorMessage) + }) + + t.Run("contains json error", func(t *testing.T) { + errorMessage := `{"typeName":"exception-name","message":"example error message"}` + response := &http.Response{ + ContentLength: int64(len(errorMessage)), + Header: http.Header{ + actions.HeaderActionsActivityID: []string{"activity-id"}, + "Content-Type": []string{"application/json"}, + }, + StatusCode: 404, + Body: io.NopCloser(strings.NewReader(errorMessage)), + } + + err := actions.ParseActionsErrorFromResponse(response) + require.Error(t, err) + var actionsError *actions.ActionsError + assert.ErrorAs(t, err, &actionsError) + assert.Equal(t, actionsError.ActivityID, "activity-id") + assert.Equal(t, actionsError.StatusCode, 404) + + inner, ok := actionsError.Err.(*actions.ActionsExceptionError) + require.True(t, ok) + assert.Equal(t, inner.ExceptionName, "exception-name") + assert.Equal(t, inner.Message, "example error message") + }) + + t.Run("wrapped exception error", func(t *testing.T) { + errorMessage := `{"typeName":"exception-name","message":"example error message"}` + response := &http.Response{ + ContentLength: int64(len(errorMessage)), + Header: http.Header{ + actions.HeaderActionsActivityID: []string{"activity-id"}, + "Content-Type": []string{"application/json"}, + }, + StatusCode: 404, + Body: io.NopCloser(strings.NewReader(errorMessage)), + } + + err := actions.ParseActionsErrorFromResponse(response) + require.Error(t, err) + + var actionsExceptionError *actions.ActionsExceptionError + assert.ErrorAs(t, err, &actionsExceptionError) + + assert.Equal(t, actionsExceptionError.ExceptionName, "exception-name") + assert.Equal(t, actionsExceptionError.Message, "example error message") + }) +} diff --git a/github/actions/github_api_request_test.go b/github/actions/github_api_request_test.go index 78f740ec..18998cdd 100644 --- a/github/actions/github_api_request_test.go +++ b/github/actions/github_api_request_test.go @@ -139,7 +139,13 @@ func TestNewActionsServiceRequest(t *testing.T) { w.WriteHeader(http.StatusUnauthorized) w.Write([]byte(errMessage)) } - server := testserver.New(t, nil, testserver.WithActionsToken("random-token"), testserver.WithActionsToken(newToken), testserver.WithActionsRegistrationTokenHandler(unauthorizedHandler)) + server := testserver.New( + t, + nil, + testserver.WithActionsToken("random-token"), + testserver.WithActionsToken(newToken), + testserver.WithActionsRegistrationTokenHandler(unauthorizedHandler), + ) client, err := actions.NewClient(server.ConfigURLForOrg("my-org"), defaultCreds) require.NoError(t, err) expiringToken := "expiring-token" From 4ee49fee144793ea507e82ce0def717d2f1698b6 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Tue, 16 Apr 2024 14:00:40 +0200 Subject: [PATCH 03/19] Propagate max capacity information to the actions back-end (#3431) --- cmd/ghalistener/listener/listener.go | 16 ++++---- cmd/ghalistener/listener/listener_test.go | 17 +++++--- cmd/ghalistener/listener/mocks/client.go | 18 ++++----- .../autoScalerMessageListener.go | 4 +- .../autoScalerMessageListener_test.go | 38 +++++++++--------- .../autoScalerService.go | 2 +- .../autoScalerService_test.go | 14 +++---- .../messageListener.go | 2 +- .../mock_RunnerScaleSetClient.go | 10 ++--- .../sessionrefreshingclient.go | 10 +++-- .../sessionrefreshingclient_test.go | 22 +++++------ github/actions/client.go | 14 ++++++- .../client_runner_scale_set_message_test.go | 39 ++++++++++++++++--- github/actions/errors.go | 1 - github/actions/fake/client.go | 2 +- github/actions/mock_ActionsService.go | 18 ++++----- github/actions/mock_SessionService.go | 18 ++++----- github/actions/sessionservice.go | 2 +- 18 files changed, 147 insertions(+), 100 deletions(-) diff --git a/cmd/ghalistener/listener/listener.go b/cmd/ghalistener/listener/listener.go index 56da0a8f..f433cbb9 100644 --- a/cmd/ghalistener/listener/listener.go +++ b/cmd/ghalistener/listener/listener.go @@ -31,7 +31,7 @@ const ( type Client interface { GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) - GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) + GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) @@ -80,6 +80,7 @@ type Listener struct { // updated fields lastMessageID int64 // The ID of the last processed message. + maxCapacity int // The maximum number of runners that can be created. session *actions.RunnerScaleSetSession // The session for managing the runner scale set. } @@ -89,10 +90,11 @@ func New(config Config) (*Listener, error) { } listener := &Listener{ - scaleSetID: config.ScaleSetID, - client: config.Client, - logger: config.Logger, - metrics: metrics.Discard, + scaleSetID: config.ScaleSetID, + client: config.Client, + logger: config.Logger, + metrics: metrics.Discard, + maxCapacity: config.MaxRunners, } if config.Metrics != nil { @@ -267,7 +269,7 @@ func (l *Listener) createSession(ctx context.Context) error { func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) { l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID) - msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID) + msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity) if err == nil { // if NO error return msg, nil } @@ -283,7 +285,7 @@ func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessa l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID) - msg, err = l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID) + msg, err = l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity) if err != nil { // if NO error return nil, fmt.Errorf("failed to get next message after message session refresh: %w", err) } diff --git a/cmd/ghalistener/listener/listener_test.go b/cmd/ghalistener/listener/listener_test.go index 610abc40..c39a0773 100644 --- a/cmd/ghalistener/listener/listener_test.go +++ b/cmd/ghalistener/listener/listener_test.go @@ -123,13 +123,14 @@ func TestListener_getMessage(t *testing.T) { config := Config{ ScaleSetID: 1, Metrics: metrics.Discard, + MaxRunners: 10, } client := listenermocks.NewClient(t) want := &actions.RunnerScaleSetMessage{ MessageId: 1, } - client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(want, nil).Once() + client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once() config.Client = client l, err := New(config) @@ -148,10 +149,11 @@ func TestListener_getMessage(t *testing.T) { config := Config{ ScaleSetID: 1, Metrics: metrics.Discard, + MaxRunners: 10, } client := listenermocks.NewClient(t) - client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.HttpClientSideError{Code: http.StatusNotFound}).Once() + client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.HttpClientSideError{Code: http.StatusNotFound}).Once() config.Client = client l, err := New(config) @@ -170,6 +172,7 @@ func TestListener_getMessage(t *testing.T) { config := Config{ ScaleSetID: 1, Metrics: metrics.Discard, + MaxRunners: 10, } client := listenermocks.NewClient(t) @@ -185,12 +188,12 @@ func TestListener_getMessage(t *testing.T) { } client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once() - client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() + client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() want := &actions.RunnerScaleSetMessage{ MessageId: 1, } - client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(want, nil).Once() + client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(want, nil).Once() config.Client = client @@ -214,6 +217,7 @@ func TestListener_getMessage(t *testing.T) { config := Config{ ScaleSetID: 1, Metrics: metrics.Discard, + MaxRunners: 10, } client := listenermocks.NewClient(t) @@ -229,7 +233,7 @@ func TestListener_getMessage(t *testing.T) { } client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once() - client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice() + client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Twice() config.Client = client @@ -450,6 +454,7 @@ func TestListener_Listen(t *testing.T) { config := Config{ ScaleSetID: 1, Metrics: metrics.Discard, + MaxRunners: 10, } client := listenermocks.NewClient(t) @@ -470,7 +475,7 @@ func TestListener_Listen(t *testing.T) { MessageType: "RunnerScaleSetJobMessages", Statistics: &actions.RunnerScaleSetStatistic{}, } - client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything). + client.On("GetMessage", ctx, mock.Anything, mock.Anything, mock.Anything, 10). Return(msg, nil). Run( func(mock.Arguments) { diff --git a/cmd/ghalistener/listener/mocks/client.go b/cmd/ghalistener/listener/mocks/client.go index 9c3d38fd..a36c9344 100644 --- a/cmd/ghalistener/listener/mocks/client.go +++ b/cmd/ghalistener/listener/mocks/client.go @@ -123,25 +123,25 @@ func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) ( return r0, r1 } -// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId -func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) { - ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) +// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity +func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) { + ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity) var r0 *actions.RunnerScaleSetMessage var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*actions.RunnerScaleSetMessage, error)); ok { - return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) + if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) (*actions.RunnerScaleSetMessage, error)); ok { + return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity) } - if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *actions.RunnerScaleSetMessage); ok { - r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) + if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) *actions.RunnerScaleSetMessage); ok { + r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*actions.RunnerScaleSetMessage) } } - if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok { - r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) + if rf, ok := ret.Get(1).(func(context.Context, string, string, int64, int) error); ok { + r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity) } else { r1 = ret.Error(1) } diff --git a/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go index 0d7f5a2b..26c5072d 100644 --- a/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go +++ b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener.go @@ -129,7 +129,7 @@ func (m *AutoScalerClient) Close() error { return m.client.Close() } -func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error) error { +func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error { if m.initialMessage != nil { err := handler(m.initialMessage) if err != nil { @@ -141,7 +141,7 @@ func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler } for { - message, err := m.client.GetMessage(ctx, m.lastMessageId) + message, err := m.client.GetMessage(ctx, m.lastMessageId, maxCapacity) if err != nil { return fmt.Errorf("get message failed from refreshing client. %w", err) } diff --git a/cmd/githubrunnerscalesetlistener/autoScalerMessageListener_test.go b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener_test.go index 2d6ef711..c48a9a54 100644 --- a/cmd/githubrunnerscalesetlistener/autoScalerMessageListener_test.go +++ b/cmd/githubrunnerscalesetlistener/autoScalerMessageListener_test.go @@ -317,7 +317,7 @@ func TestGetRunnerScaleSetMessage(t *testing.T) { Statistics: &actions.RunnerScaleSetStatistic{}, } mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) - mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ + mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{ MessageId: 1, MessageType: "test", Body: "test", @@ -332,7 +332,7 @@ func TestGetRunnerScaleSetMessage(t *testing.T) { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return nil - }) + }, 10) assert.NoError(t, err, "Error getting message") assert.Equal(t, int64(0), asClient.lastMessageId, "Initial message") @@ -340,7 +340,7 @@ func TestGetRunnerScaleSetMessage(t *testing.T) { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return nil - }) + }, 10) assert.NoError(t, err, "Error getting message") assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated") @@ -368,7 +368,7 @@ func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) { Statistics: &actions.RunnerScaleSetStatistic{}, } mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) - mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ + mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{ MessageId: 1, MessageType: "test", Body: "test", @@ -383,14 +383,14 @@ func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return nil - }) + }, 10) assert.NoError(t, err, "Error getting message") err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return fmt.Errorf("error") - }) + }, 10) assert.ErrorContains(t, err, "handle message failed. error", "Error getting message") assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should not be updated") @@ -419,7 +419,7 @@ func TestGetRunnerScaleSetMessage_HandleInitialMessage(t *testing.T) { TotalAssignedJobs: 2, }, } - mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) + mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything, mock.Anything).Return(session, nil) mockActionsClient.On("GetAcquirableJobs", ctx, 1).Return(&actions.AcquirableJobList{ Count: 1, Jobs: []actions.AcquirableJob{ @@ -439,7 +439,7 @@ func TestGetRunnerScaleSetMessage_HandleInitialMessage(t *testing.T) { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return nil - }) + }, 10) assert.NoError(t, err, "Error getting message") assert.Nil(t, asClient.initialMessage, "Initial message should be nil") @@ -488,7 +488,7 @@ func TestGetRunnerScaleSetMessage_HandleInitialMessageFailed(t *testing.T) { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return fmt.Errorf("error") - }) + }, 10) assert.ErrorContains(t, err, "fail to process initial message. error", "Error getting message") assert.NotNil(t, asClient.initialMessage, "Initial message should be nil") @@ -516,8 +516,8 @@ func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) { Statistics: &actions.RunnerScaleSetStatistic{}, } mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) - mockSessionClient.On("GetMessage", ctx, int64(0)).Return(nil, nil).Times(3) - mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ + mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, nil).Times(3) + mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{ MessageId: 1, MessageType: "test", Body: "test", @@ -532,13 +532,13 @@ func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return nil - }) + }, 10) assert.NoError(t, err, "Error getting initial message") err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return nil - }) + }, 10) assert.NoError(t, err, "Error getting message") assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated") @@ -565,7 +565,7 @@ func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) { Statistics: &actions.RunnerScaleSetStatistic{}, } mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) - mockSessionClient.On("GetMessage", ctx, int64(0)).Return(nil, fmt.Errorf("error")) + mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(nil, fmt.Errorf("error")) asClient, err := NewAutoScalerClient(ctx, mockActionsClient, &logger, 1, func(asc *AutoScalerClient) { asc.client = mockSessionClient @@ -575,12 +575,12 @@ func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) { // process initial message err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { return nil - }) + }, 10) assert.NoError(t, err, "Error getting initial message") err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { return fmt.Errorf("Should not be called") - }) + }, 10) assert.ErrorContains(t, err, "get message failed from refreshing client. error", "Error should be returned") assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated") @@ -608,7 +608,7 @@ func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) { Statistics: &actions.RunnerScaleSetStatistic{}, } mockActionsClient.On("CreateMessageSession", ctx, 1, mock.Anything).Return(session, nil) - mockSessionClient.On("GetMessage", ctx, int64(0)).Return(&actions.RunnerScaleSetMessage{ + mockSessionClient.On("GetMessage", ctx, int64(0), mock.Anything).Return(&actions.RunnerScaleSetMessage{ MessageId: 1, MessageType: "test", Body: "test", @@ -623,13 +623,13 @@ func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) { err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return nil - }) + }, 10) assert.NoError(t, err, "Error getting initial message") err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error { logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body) return nil - }) + }, 10) assert.ErrorContains(t, err, "delete message failed from refreshing client. error", "Error getting message") assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated") diff --git a/cmd/githubrunnerscalesetlistener/autoScalerService.go b/cmd/githubrunnerscalesetlistener/autoScalerService.go index b8e14521..c3097212 100644 --- a/cmd/githubrunnerscalesetlistener/autoScalerService.go +++ b/cmd/githubrunnerscalesetlistener/autoScalerService.go @@ -89,7 +89,7 @@ func (s *Service) Start() error { s.logger.Info("service is stopped.") return nil default: - err := s.rsClient.GetRunnerScaleSetMessage(s.ctx, s.processMessage) + err := s.rsClient.GetRunnerScaleSetMessage(s.ctx, s.processMessage, s.settings.MaxRunners) if err != nil { return fmt.Errorf("could not get and process message. %w", err) } diff --git a/cmd/githubrunnerscalesetlistener/autoScalerService_test.go b/cmd/githubrunnerscalesetlistener/autoScalerService_test.go index d0e54545..9a353d16 100644 --- a/cmd/githubrunnerscalesetlistener/autoScalerService_test.go +++ b/cmd/githubrunnerscalesetlistener/autoScalerService_test.go @@ -64,7 +64,7 @@ func TestStart(t *testing.T) { ) require.NoError(t, err) - mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(mock.Arguments) { cancel() }).Return(nil).Once() err = service.Start() @@ -98,7 +98,7 @@ func TestStart_ScaleToMinRunners(t *testing.T) { ) require.NoError(t, err) - mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) { + mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { _ = service.scaleForAssignedJobCount(5) }).Return(nil) @@ -137,7 +137,7 @@ func TestStart_ScaleToMinRunnersFailed(t *testing.T) { require.NoError(t, err) c := mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once() - mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) { + mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { _ = service.scaleForAssignedJobCount(5) }).Return(c.ReturnArguments.Get(0)) @@ -172,8 +172,8 @@ func TestStart_GetMultipleMessages(t *testing.T) { ) require.NoError(t, err) - mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(5) - mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(5) + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once() err = service.Start() @@ -207,8 +207,8 @@ func TestStart_ErrorOnMessage(t *testing.T) { ) require.NoError(t, err) - mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(2) - mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(fmt.Errorf("error")).Once() + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(nil).Times(2) + mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything, mock.Anything).Return(fmt.Errorf("error")).Once() err = service.Start() diff --git a/cmd/githubrunnerscalesetlistener/messageListener.go b/cmd/githubrunnerscalesetlistener/messageListener.go index 0f01db58..e90aa454 100644 --- a/cmd/githubrunnerscalesetlistener/messageListener.go +++ b/cmd/githubrunnerscalesetlistener/messageListener.go @@ -8,6 +8,6 @@ import ( //go:generate mockery --inpackage --name=RunnerScaleSetClient type RunnerScaleSetClient interface { - GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error) error + GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error } diff --git a/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go b/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go index 80ba900a..a6f6a5d1 100644 --- a/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go +++ b/cmd/githubrunnerscalesetlistener/mock_RunnerScaleSetClient.go @@ -29,13 +29,13 @@ func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Con return r0 } -// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler -func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error) error { - ret := _m.Called(ctx, handler) +// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler, maxCapacity +func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error, maxCapacity int) error { + ret := _m.Called(ctx, handler, maxCapacity) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, func(*actions.RunnerScaleSetMessage) error) error); ok { - r0 = rf(ctx, handler) + if rf, ok := ret.Get(0).(func(context.Context, func(*actions.RunnerScaleSetMessage) error, int) error); ok { + r0 = rf(ctx, handler, maxCapacity) } else { r0 = ret.Error(0) } diff --git a/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go index 11df7e21..f3262c15 100644 --- a/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go +++ b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient.go @@ -24,8 +24,12 @@ func newSessionClient(client actions.ActionsService, logger *logr.Logger, sessio } } -func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) { - message, err := m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId) +func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) { + if maxCapacity < 0 { + return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0") + } + + message, err := m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId, maxCapacity) if err == nil { return message, nil } @@ -42,7 +46,7 @@ func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId } m.session = session - message, err = m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId) + message, err = m.client.GetMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, lastMessageId, maxCapacity) if err != nil { return nil, fmt.Errorf("delete message failed after refresh message session. %w", err) } diff --git a/cmd/githubrunnerscalesetlistener/sessionrefreshingclient_test.go b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient_test.go index 1423a0ce..1cdfb6c7 100644 --- a/cmd/githubrunnerscalesetlistener/sessionrefreshingclient_test.go +++ b/cmd/githubrunnerscalesetlistener/sessionrefreshingclient_test.go @@ -31,17 +31,17 @@ func TestGetMessage(t *testing.T) { }, } - mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, nil).Once() - mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(&actions.RunnerScaleSetMessage{MessageId: 1}, nil).Once() + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, nil).Once() + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(&actions.RunnerScaleSetMessage{MessageId: 1}, nil).Once() client := newSessionClient(mockActionsClient, &logger, session) - msg, err := client.GetMessage(ctx, 0) + msg, err := client.GetMessage(ctx, 0, 10) require.NoError(t, err, "GetMessage should not return an error") assert.Nil(t, msg, "GetMessage should return nil message") - msg, err = client.GetMessage(ctx, 0) + msg, err = client.GetMessage(ctx, 0, 10) require.NoError(t, err, "GetMessage should not return an error") assert.Equal(t, int64(1), msg.MessageId, "GetMessage should return a message with id 1") @@ -146,11 +146,11 @@ func TestGetMessage_Error(t *testing.T) { }, } - mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, fmt.Errorf("error")).Once() + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, fmt.Errorf("error")).Once() client := newSessionClient(mockActionsClient, &logger, session) - msg, err := client.GetMessage(ctx, 0) + msg, err := client.GetMessage(ctx, 0, 10) assert.ErrorContains(t, err, "get message failed. error", "GetMessage should return an error") assert.Nil(t, msg, "GetMessage should return nil message") assert.True(t, mockActionsClient.AssertExpectations(t), "All expected calls to mockActionsClient should have been made") @@ -227,8 +227,8 @@ func TestGetMessage_RefreshToken(t *testing.T) { Id: 1, }, } - mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() - mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, "token2", int64(0)).Return(&actions.RunnerScaleSetMessage{ + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, "token2", int64(0), 10).Return(&actions.RunnerScaleSetMessage{ MessageId: 1, MessageType: "test", Body: "test", @@ -243,7 +243,7 @@ func TestGetMessage_RefreshToken(t *testing.T) { }, nil).Once() client := newSessionClient(mockActionsClient, &logger, session) - msg, err := client.GetMessage(ctx, 0) + msg, err := client.GetMessage(ctx, 0, 10) assert.NoError(t, err, "Error getting message") assert.Equal(t, int64(1), msg.MessageId, "message id should be updated") assert.Equal(t, "token2", client.session.MessageQueueAccessToken, "Message queue access token should be updated") @@ -340,11 +340,11 @@ func TestGetMessage_RefreshToken_Failed(t *testing.T) { Id: 1, }, } - mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0)).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() + mockActionsClient.On("GetMessage", ctx, session.MessageQueueUrl, session.MessageQueueAccessToken, int64(0), 10).Return(nil, &actions.MessageQueueTokenExpiredError{}).Once() mockActionsClient.On("RefreshMessageSession", ctx, session.RunnerScaleSet.Id, session.SessionId).Return(nil, fmt.Errorf("error")) client := newSessionClient(mockActionsClient, &logger, session) - msg, err := client.GetMessage(ctx, 0) + msg, err := client.GetMessage(ctx, 0, 10) assert.ErrorContains(t, err, "refresh message session failed. error", "Error should be returned") assert.Nil(t, msg, "Message should be nil") assert.Equal(t, "token", client.session.MessageQueueAccessToken, "Message queue access token should not be updated") diff --git a/github/actions/client.go b/github/actions/client.go index 3470384f..18a078cf 100644 --- a/github/actions/client.go +++ b/github/actions/client.go @@ -29,6 +29,9 @@ const ( apiVersionQueryParam = "api-version=6.0-preview" ) +// Header used to propagate capacity information to the back-end +const HeaderScaleSetMaxCapacity = "X-ScaleSetMaxCapacity" + //go:generate mockery --inpackage --name=ActionsService type ActionsService interface { GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runnerScaleSetName string) (*RunnerScaleSet, error) @@ -45,7 +48,7 @@ type ActionsService interface { AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*AcquirableJobList, error) - GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*RunnerScaleSetMessage, error) + GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *RunnerScaleSetJitRunnerSetting, scaleSetId int) (*RunnerScaleSetJitRunnerConfig, error) @@ -104,6 +107,8 @@ type Client struct { proxyFunc ProxyFunc } +var _ ActionsService = &Client{} + type ProxyFunc func(req *http.Request) (*url.URL, error) type ClientOption func(*Client) @@ -543,7 +548,7 @@ func (c *Client) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int) return nil } -func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*RunnerScaleSetMessage, error) { +func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error) { u, err := url.Parse(messageQueueUrl) if err != nil { return nil, err @@ -555,6 +560,10 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc u.RawQuery = q.Encode() } + if maxCapacity < 0 { + return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0") + } + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) if err != nil { return nil, err @@ -563,6 +572,7 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc req.Header.Set("Accept", "application/json; api-version=6.0-preview") req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", messageQueueAccessToken)) req.Header.Set("User-Agent", c.userAgent.String()) + req.Header.Set(HeaderScaleSetMaxCapacity, strconv.Itoa(maxCapacity)) resp, err := c.Do(req) if err != nil { diff --git a/github/actions/client_runner_scale_set_message_test.go b/github/actions/client_runner_scale_set_message_test.go index cb67c310..8b15a835 100644 --- a/github/actions/client_runner_scale_set_message_test.go +++ b/github/actions/client_runner_scale_set_message_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "net/http" + "strconv" "testing" "time" @@ -35,7 +36,7 @@ func TestGetMessage(t *testing.T) { client, err := actions.NewClient(s.configURLForOrg("my-org"), auth) require.NoError(t, err) - got, err := client.GetMessage(ctx, s.URL, token, 0) + got, err := client.GetMessage(ctx, s.URL, token, 0, 10) require.NoError(t, err) assert.Equal(t, want, got) }) @@ -52,7 +53,7 @@ func TestGetMessage(t *testing.T) { client, err := actions.NewClient(s.configURLForOrg("my-org"), auth) require.NoError(t, err) - got, err := client.GetMessage(ctx, s.URL, token, 1) + got, err := client.GetMessage(ctx, s.URL, token, 1, 10) require.NoError(t, err) assert.Equal(t, want, got) }) @@ -76,7 +77,7 @@ func TestGetMessage(t *testing.T) { ) require.NoError(t, err) - _, err = client.GetMessage(ctx, server.URL, token, 0) + _, err = client.GetMessage(ctx, server.URL, token, 0, 10) assert.NotNil(t, err) assert.Equalf(t, actualRetry, expectedRetry, "A retry was expected after the first request but got: %v", actualRetry) }) @@ -89,7 +90,7 @@ func TestGetMessage(t *testing.T) { client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) - _, err = client.GetMessage(ctx, server.URL, token, 0) + _, err = client.GetMessage(ctx, server.URL, token, 0, 10) require.NotNil(t, err) var expectedErr *actions.MessageQueueTokenExpiredError @@ -108,7 +109,7 @@ func TestGetMessage(t *testing.T) { client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) - _, err = client.GetMessage(ctx, server.URL, token, 0) + _, err = client.GetMessage(ctx, server.URL, token, 0, 10) require.NotNil(t, err) assert.Equal(t, want.Error(), err.Error()) }) @@ -122,9 +123,35 @@ func TestGetMessage(t *testing.T) { client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) require.NoError(t, err) - _, err = client.GetMessage(ctx, server.URL, token, 0) + _, err = client.GetMessage(ctx, server.URL, token, 0, 10) assert.NotNil(t, err) }) + + t.Run("Capacity error handling", func(t *testing.T) { + server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + hc := r.Header.Get(actions.HeaderScaleSetMaxCapacity) + c, err := strconv.Atoi(hc) + require.NoError(t, err) + assert.GreaterOrEqual(t, c, 0) + + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "text/plain") + })) + + client, err := actions.NewClient(server.configURLForOrg("my-org"), auth) + require.NoError(t, err) + + _, err = client.GetMessage(ctx, server.URL, token, 0, -1) + require.Error(t, err) + // Ensure we don't send requests with negative capacity + assert.False(t, errors.Is(err, &actions.ActionsError{})) + + _, err = client.GetMessage(ctx, server.URL, token, 0, 0) + assert.Error(t, err) + var expectedErr *actions.ActionsError + assert.ErrorAs(t, err, &expectedErr) + assert.Equal(t, http.StatusBadRequest, expectedErr.StatusCode) + }) } func TestDeleteMessage(t *testing.T) { diff --git a/github/actions/errors.go b/github/actions/errors.go index 736520bd..86ba5b6d 100644 --- a/github/actions/errors.go +++ b/github/actions/errors.go @@ -47,7 +47,6 @@ func (e *ActionsError) IsException(target string) bool { if ex, ok := e.Err.(*ActionsExceptionError); ok { return strings.Contains(ex.ExceptionName, target) } - return false } diff --git a/github/actions/fake/client.go b/github/actions/fake/client.go index de51a278..a108b902 100644 --- a/github/actions/fake/client.go +++ b/github/actions/fake/client.go @@ -259,7 +259,7 @@ func (f *FakeClient) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int return f.getAcquirableJobsResult.AcquirableJobList, f.getAcquirableJobsResult.err } -func (f *FakeClient) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64) (*actions.RunnerScaleSetMessage, error) { +func (f *FakeClient) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) { return f.getMessageResult.RunnerScaleSetMessage, f.getMessageResult.err } diff --git a/github/actions/mock_ActionsService.go b/github/actions/mock_ActionsService.go index 0216cf30..849f2c19 100644 --- a/github/actions/mock_ActionsService.go +++ b/github/actions/mock_ActionsService.go @@ -186,25 +186,25 @@ func (_m *MockActionsService) GetAcquirableJobs(ctx context.Context, runnerScale return r0, r1 } -// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId -func (_m *MockActionsService) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64) (*RunnerScaleSetMessage, error) { - ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) +// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity +func (_m *MockActionsService) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error) { + ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity) var r0 *RunnerScaleSetMessage var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) (*RunnerScaleSetMessage, error)); ok { - return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) + if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) (*RunnerScaleSetMessage, error)); ok { + return rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity) } - if rf, ok := ret.Get(0).(func(context.Context, string, string, int64) *RunnerScaleSetMessage); ok { - r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) + if rf, ok := ret.Get(0).(func(context.Context, string, string, int64, int) *RunnerScaleSetMessage); ok { + r0 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*RunnerScaleSetMessage) } } - if rf, ok := ret.Get(1).(func(context.Context, string, string, int64) error); ok { - r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId) + if rf, ok := ret.Get(1).(func(context.Context, string, string, int64, int) error); ok { + r1 = rf(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity) } else { r1 = ret.Error(1) } diff --git a/github/actions/mock_SessionService.go b/github/actions/mock_SessionService.go index ed403eee..f587cac8 100644 --- a/github/actions/mock_SessionService.go +++ b/github/actions/mock_SessionService.go @@ -67,25 +67,25 @@ func (_m *MockSessionService) DeleteMessage(ctx context.Context, messageId int64 return r0 } -// GetMessage provides a mock function with given fields: ctx, lastMessageId -func (_m *MockSessionService) GetMessage(ctx context.Context, lastMessageId int64) (*RunnerScaleSetMessage, error) { - ret := _m.Called(ctx, lastMessageId) +// GetMessage provides a mock function with given fields: ctx, lastMessageId, maxCapacity +func (_m *MockSessionService) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error) { + ret := _m.Called(ctx, lastMessageId, maxCapacity) var r0 *RunnerScaleSetMessage var r1 error - if rf, ok := ret.Get(0).(func(context.Context, int64) (*RunnerScaleSetMessage, error)); ok { - return rf(ctx, lastMessageId) + if rf, ok := ret.Get(0).(func(context.Context, int64, int) (*RunnerScaleSetMessage, error)); ok { + return rf(ctx, lastMessageId, maxCapacity) } - if rf, ok := ret.Get(0).(func(context.Context, int64) *RunnerScaleSetMessage); ok { - r0 = rf(ctx, lastMessageId) + if rf, ok := ret.Get(0).(func(context.Context, int64, int) *RunnerScaleSetMessage); ok { + r0 = rf(ctx, lastMessageId, maxCapacity) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*RunnerScaleSetMessage) } } - if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { - r1 = rf(ctx, lastMessageId) + if rf, ok := ret.Get(1).(func(context.Context, int64, int) error); ok { + r1 = rf(ctx, lastMessageId, maxCapacity) } else { r1 = ret.Error(1) } diff --git a/github/actions/sessionservice.go b/github/actions/sessionservice.go index 6ae20fa0..21311aa0 100644 --- a/github/actions/sessionservice.go +++ b/github/actions/sessionservice.go @@ -7,7 +7,7 @@ import ( //go:generate mockery --inpackage --name=SessionService type SessionService interface { - GetMessage(ctx context.Context, lastMessageId int64) (*RunnerScaleSetMessage, error) + GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error) DeleteMessage(ctx context.Context, messageId int64) error AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) io.Closer From f965dfef73dcbec055ec4020b3d22983e163681f Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Tue, 16 Apr 2024 21:29:03 +0200 Subject: [PATCH 04/19] Shutdown metrics server when listener exits (#3445) --- cmd/ghalistener/app/app.go | 8 ++++++-- cmd/ghalistener/listener/listener.go | 4 ++-- cmd/ghalistener/listener/listener_test.go | 4 ++-- cmd/ghalistener/metrics/metrics.go | 5 ++++- 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/cmd/ghalistener/app/app.go b/cmd/ghalistener/app/app.go index 76fc3824..e21703c9 100644 --- a/cmd/ghalistener/app/app.go +++ b/cmd/ghalistener/app/app.go @@ -117,15 +117,19 @@ func (app *App) Run(ctx context.Context) error { } g, ctx := errgroup.WithContext(ctx) + metricsCtx, cancelMetrics := context.WithCancelCause(ctx) + g.Go(func() error { app.logger.Info("Starting listener") - return app.listener.Listen(ctx, app.worker) + listnerErr := app.listener.Listen(ctx, app.worker) + cancelMetrics(fmt.Errorf("Listener exited: %w", listnerErr)) + return listnerErr }) if app.metrics != nil { g.Go(func() error { app.logger.Info("Starting metrics server") - return app.metrics.ListenAndServe(ctx) + return app.metrics.ListenAndServe(metricsCtx) }) } diff --git a/cmd/ghalistener/listener/listener.go b/cmd/ghalistener/listener/listener.go index f433cbb9..79009726 100644 --- a/cmd/ghalistener/listener/listener.go +++ b/cmd/ghalistener/listener/listener.go @@ -174,8 +174,8 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error { continue } - // New context is created to avoid cancelation during message handling. - if err := l.handleMessage(context.Background(), handler, msg); err != nil { + // Remove cancellation from the context to avoid cancelling the message handling. + if err := l.handleMessage(context.WithoutCancel(ctx), handler, msg); err != nil { return fmt.Errorf("failed to handle message: %w", err) } } diff --git a/cmd/ghalistener/listener/listener_test.go b/cmd/ghalistener/listener/listener_test.go index c39a0773..d2af2b03 100644 --- a/cmd/ghalistener/listener/listener_test.go +++ b/cmd/ghalistener/listener/listener_test.go @@ -484,8 +484,8 @@ func TestListener_Listen(t *testing.T) { ). Once() - // Ensure delete message is called with background context - client.On("DeleteMessage", context.Background(), mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + // Ensure delete message is called without cancel + client.On("DeleteMessage", context.WithoutCancel(ctx), mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() config.Client = client diff --git a/cmd/ghalistener/metrics/metrics.go b/cmd/ghalistener/metrics/metrics.go index ec46ed2c..2940dd2f 100644 --- a/cmd/ghalistener/metrics/metrics.go +++ b/cmd/ghalistener/metrics/metrics.go @@ -4,6 +4,7 @@ import ( "context" "net/http" "strconv" + "time" "github.com/actions/actions-runner-controller/github/actions" "github.com/go-logr/logr" @@ -338,7 +339,9 @@ func (e *exporter) ListenAndServe(ctx context.Context) error { e.logger.Info("starting metrics server", "addr", e.srv.Addr) go func() { <-ctx.Done() - e.logger.Info("stopping metrics server") + e.logger.Info("stopping metrics server", "err", ctx.Err()) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() e.srv.Shutdown(ctx) }() return e.srv.ListenAndServe() From 9e191cdd21621f4e43023e0bdbbd2ff9b139c8a6 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Wed, 17 Apr 2024 10:51:28 +0200 Subject: [PATCH 05/19] Prepare 0.9.1 release (#3448) --- .github/workflows/gha-e2e-tests.yaml | 2 +- charts/gha-runner-scale-set-controller/Chart.yaml | 4 ++-- charts/gha-runner-scale-set/Chart.yaml | 4 ++-- docs/gha-runner-scale-set-controller/README.md | 10 ++++++++++ 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/.github/workflows/gha-e2e-tests.yaml b/.github/workflows/gha-e2e-tests.yaml index de3183e9..93879063 100644 --- a/.github/workflows/gha-e2e-tests.yaml +++ b/.github/workflows/gha-e2e-tests.yaml @@ -16,7 +16,7 @@ env: TARGET_ORG: actions-runner-controller TARGET_REPO: arc_e2e_test_dummy IMAGE_NAME: "arc-test-image" - IMAGE_VERSION: "0.9.0" + IMAGE_VERSION: "0.9.1" concurrency: # This will make sure we only apply the concurrency limits on pull requests diff --git a/charts/gha-runner-scale-set-controller/Chart.yaml b/charts/gha-runner-scale-set-controller/Chart.yaml index 557bb8c6..e1aec2ca 100644 --- a/charts/gha-runner-scale-set-controller/Chart.yaml +++ b/charts/gha-runner-scale-set-controller/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.9.0 +version: 0.9.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.9.0" +appVersion: "0.9.1" home: https://github.com/actions/actions-runner-controller diff --git a/charts/gha-runner-scale-set/Chart.yaml b/charts/gha-runner-scale-set/Chart.yaml index 6b5b2d63..934d41c1 100644 --- a/charts/gha-runner-scale-set/Chart.yaml +++ b/charts/gha-runner-scale-set/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.9.0 +version: 0.9.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.9.0" +appVersion: "0.9.1" home: https://github.com/actions/actions-runner-controller diff --git a/docs/gha-runner-scale-set-controller/README.md b/docs/gha-runner-scale-set-controller/README.md index 194ff43f..f9044acf 100644 --- a/docs/gha-runner-scale-set-controller/README.md +++ b/docs/gha-runner-scale-set-controller/README.md @@ -43,6 +43,16 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h ## Changelog +### v0.9.1 + +#### Major changes + +1. Shutdown metrics server when listener exits [#3445](https://github.com/actions/actions-runner-controller/pull/3445) +1. Propagate max capacity information to the actions back-end [#3431](https://github.com/actions/actions-runner-controller/pull/3431) +1. Refactor actions client error to include request id [#3430](https://github.com/actions/actions-runner-controller/pull/3430) +1. Include self correction on empty batch and avoid removing pending runners when cluster is busy [#3426](https://github.com/actions/actions-runner-controller/pull/3426) +1. Add topologySpreadConstraint to gha-runner-scale-set-controller chart [#3405](https://github.com/actions/actions-runner-controller/pull/3405) + ### v0.9.0 #### ⚠️ Warning From 109750f8164f1de788aff0fb223eee4a0634ad49 Mon Sep 17 00:00:00 2001 From: Bryan Peterson Date: Tue, 23 Apr 2024 11:19:32 +0200 Subject: [PATCH 06/19] propogate arbitrary labels from runnersets to all created resources (#3157) --- .../actions.github.com/resourcebuilder.go | 54 +++++++++++-------- .../resourcebuilder_test.go | 3 ++ 2 files changed, 35 insertions(+), 22 deletions(-) diff --git a/controllers/actions.github.com/resourcebuilder.go b/controllers/actions.github.com/resourcebuilder.go index f6e54b29..49bdcac0 100644 --- a/controllers/actions.github.com/resourcebuilder.go +++ b/controllers/actions.github.com/resourcebuilder.go @@ -85,13 +85,13 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1. effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners } - labels := map[string]string{ + labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{ LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, LabelKeyKubernetesComponent: "runner-scale-set-listener", LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], - } + }) annotations := map[string]string{ annotationKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), @@ -411,10 +411,10 @@ func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener ObjectMeta: metav1.ObjectMeta{ Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace, - Labels: map[string]string{ + Labels: mergeLabels(autoscalingListener.Labels, map[string]string{ LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, - }, + }), }, } } @@ -426,13 +426,13 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1. ObjectMeta: metav1.ObjectMeta{ Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - Labels: map[string]string{ + Labels: mergeLabels(autoscalingListener.Labels, map[string]string{ LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, labelKeyListenerNamespace: autoscalingListener.Namespace, labelKeyListenerName: autoscalingListener.Name, "role-policy-rules-hash": rulesHash, - }, + }), }, Rules: rules, } @@ -460,14 +460,14 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1 ObjectMeta: metav1.ObjectMeta{ Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, - Labels: map[string]string{ + Labels: mergeLabels(autoscalingListener.Labels, map[string]string{ LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, labelKeyListenerNamespace: autoscalingListener.Namespace, labelKeyListenerName: autoscalingListener.Name, "role-binding-role-ref-hash": roleRefHash, "role-binding-subject-hash": subjectHash, - }, + }), }, RoleRef: roleRef, Subjects: subjects, @@ -483,11 +483,11 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v ObjectMeta: metav1.ObjectMeta{ Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace, - Labels: map[string]string{ + Labels: mergeLabels(autoscalingListener.Labels, map[string]string{ LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName, "secret-data-hash": dataHash, - }, + }), }, Data: secret.DeepCopy().Data, } @@ -502,13 +502,13 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A } runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() - labels := map[string]string{ + labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{ LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, LabelKeyKubernetesComponent: "runner-set", LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, - } + }) if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil { return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err) @@ -547,18 +547,14 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner { labels := make(map[string]string) - for _, key := range commonLabelKeys { - switch key { - case LabelKeyKubernetesComponent: - labels[key] = "runner" - default: - v, ok := ephemeralRunnerSet.Labels[key] - if !ok { - continue - } - labels[key] = v + for k, v := range ephemeralRunnerSet.Labels { + if k == LabelKeyKubernetesComponent { + labels[k] = "runner" + } else { + labels[k] = v } } + annotations := make(map[string]string) for key, val := range ephemeralRunnerSet.Annotations { annotations[key] = val @@ -751,3 +747,17 @@ func trimLabelValue(val string) string { } return val } + +func mergeLabels(base, overwrite map[string]string) map[string]string { + mergedLabels := map[string]string{} + + for k, v := range base { + mergedLabels[k] = v + } + + for k, v := range overwrite { + mergedLabels[k] = v + } + + return mergedLabels +} diff --git a/controllers/actions.github.com/resourcebuilder_test.go b/controllers/actions.github.com/resourcebuilder_test.go index 9039de95..52ab19b4 100644 --- a/controllers/actions.github.com/resourcebuilder_test.go +++ b/controllers/actions.github.com/resourcebuilder_test.go @@ -21,6 +21,7 @@ func TestLabelPropagation(t *testing.T) { Labels: map[string]string{ LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, LabelKeyKubernetesVersion: "0.2.0", + "arbitrary-label": "random-value", }, Annotations: map[string]string{ runnerScaleSetIdAnnotationKey: "1", @@ -47,6 +48,7 @@ func TestLabelPropagation(t *testing.T) { assert.Equal(t, "repo", ephemeralRunnerSet.Labels[LabelKeyGitHubRepository]) assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]) assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName]) + assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], ephemeralRunnerSet.Labels["arbitrary-label"]) listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil) require.NoError(t, err) @@ -59,6 +61,7 @@ func TestLabelPropagation(t *testing.T) { assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise]) assert.Equal(t, "org", listener.Labels[LabelKeyGitHubOrganization]) assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository]) + assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], listener.Labels["arbitrary-label"]) listenerServiceAccount := &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ From 49490c4421aa8d58a8eb375fe7a539bdfe28b7a6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 24 Apr 2024 12:21:30 +0100 Subject: [PATCH 07/19] Updates: runner to v2.316.0 (#3463) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- Makefile | 2 +- runner/Makefile | 2 +- runner/VERSION | 2 +- test/e2e/e2e_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 6cc0ac59..5a2ba48a 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ endif DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) VERSION ?= dev COMMIT_SHA = $(shell git rev-parse HEAD) -RUNNER_VERSION ?= 2.315.0 +RUNNER_VERSION ?= 2.316.0 TARGETPLATFORM ?= $(shell arch) RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_TAG ?= ${VERSION} diff --git a/runner/Makefile b/runner/Makefile index 06942fb9..7e835dfc 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.315.0 +RUNNER_VERSION ?= 2.316.0 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.0 DOCKER_VERSION ?= 24.0.7 diff --git a/runner/VERSION b/runner/VERSION index b7ab93f4..ffba9673 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1,2 +1,2 @@ -RUNNER_VERSION=2.315.0 +RUNNER_VERSION=2.316.0 RUNNER_CONTAINER_HOOKS_VERSION=0.6.0 \ No newline at end of file diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 1863fe2e..f0773bf3 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -36,7 +36,7 @@ var ( testResultCMNamePrefix = "test-result-" - RunnerVersion = "2.315.0" + RunnerVersion = "2.316.0" RunnerContainerHooksVersion = "0.6.0" ) From d9af241a7dc8d78a4ed4567fcde2d103a8aa8801 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Apr 2024 12:55:24 +0200 Subject: [PATCH 08/19] Bump golang.org/x/oauth2 from 0.15.0 to 0.19.0 (#3441) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Nikola Jokic --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 49969c6c..299c3d5f 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 golang.org/x/net v0.24.0 - golang.org/x/oauth2 v0.15.0 + golang.org/x/oauth2 v0.19.0 golang.org/x/sync v0.6.0 gomodules.xyz/jsonpatch/v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index a01084cd..46f673ad 100644 --- a/go.sum +++ b/go.sum @@ -255,8 +255,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 87f2e00971fbe267e676a9639a15576c8013263f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Apr 2024 12:56:05 +0200 Subject: [PATCH 09/19] Bump go.uber.org/zap from 1.26.0 to 1.27.0 (#3442) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 299c3d5f..3b40f12a 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/stretchr/testify v1.9.0 github.com/teambition/rrule-go v1.8.2 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.26.0 + go.uber.org/zap v1.27.0 golang.org/x/net v0.24.0 golang.org/x/oauth2 v0.19.0 golang.org/x/sync v0.6.0 diff --git a/go.sum b/go.sum index 46f673ad..f9d4e3a6 100644 --- a/go.sum +++ b/go.sum @@ -221,12 +221,12 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= From 2889029bc51913180701f964dbe94959fcb14595 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Apr 2024 12:56:43 +0200 Subject: [PATCH 10/19] Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#3462) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3b40f12a..e4ae45e0 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo/v2 v2.17.1 - github.com/onsi/gomega v1.30.0 + github.com/onsi/gomega v1.33.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.17.0 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index f9d4e3a6..df3e57ef 100644 --- a/go.sum +++ b/go.sum @@ -173,8 +173,8 @@ github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8 github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= +github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= From a1b8e0cc3d280cfae73a4c1dc24dc49da371d1d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 08:53:19 +0200 Subject: [PATCH 11/19] Bump golang.org/x/sync from 0.6.0 to 0.7.0 (#3482) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e4ae45e0..74a42a3d 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/net v0.24.0 golang.org/x/oauth2 v0.19.0 - golang.org/x/sync v0.6.0 + golang.org/x/sync v0.7.0 gomodules.xyz/jsonpatch/v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.28.4 diff --git a/go.sum b/go.sum index df3e57ef..f1dd2cc9 100644 --- a/go.sum +++ b/go.sum @@ -263,8 +263,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 51c70a64c3dccb27cdd381472f938e28cb7ed16e Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Mon, 13 May 2024 14:16:36 +0200 Subject: [PATCH 12/19] Include controller version in logs (#3473) --- main.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/main.go b/main.go index 284920da..cfe14c6c 100644 --- a/main.go +++ b/main.go @@ -256,7 +256,7 @@ func main() { if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{ Client: mgr.GetClient(), - Log: log.WithName("AutoscalingRunnerSet"), + Log: log.WithName("AutoscalingRunnerSet").WithValues("version", build.Version), Scheme: mgr.GetScheme(), ControllerNamespace: managerNamespace, DefaultRunnerScaleSetListenerImage: managerImage, @@ -270,7 +270,7 @@ func main() { if err = (&actionsgithubcom.EphemeralRunnerReconciler{ Client: mgr.GetClient(), - Log: log.WithName("EphemeralRunner"), + Log: log.WithName("EphemeralRunner").WithValues("version", build.Version), Scheme: mgr.GetScheme(), ActionsClient: actionsMultiClient, }).SetupWithManager(mgr); err != nil { @@ -280,7 +280,7 @@ func main() { if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{ Client: mgr.GetClient(), - Log: log.WithName("EphemeralRunnerSet"), + Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version), Scheme: mgr.GetScheme(), ActionsClient: actionsMultiClient, PublishMetrics: metricsAddr != "0", @@ -291,7 +291,7 @@ func main() { if err = (&actionsgithubcom.AutoscalingListenerReconciler{ Client: mgr.GetClient(), - Log: log.WithName("AutoscalingListener"), + Log: log.WithName("AutoscalingListener").WithValues("version", build.Version), Scheme: mgr.GetScheme(), ListenerMetricsAddr: listenerMetricsAddr, ListenerMetricsEndpoint: listenerMetricsEndpoint, @@ -441,7 +441,7 @@ func main() { } } - log.Info("starting manager") + log.Info("starting manager", "version", build.Version) if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { log.Error(err, "problem running manager") os.Exit(1) From a6d87c46cd01d5ded0c547793ead736fbdd47061 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 11:24:14 +0200 Subject: [PATCH 13/19] Updates: runner to v2.316.1 (#3496) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- Makefile | 2 +- runner/Makefile | 2 +- runner/VERSION | 2 +- test/e2e/e2e_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 5a2ba48a..684f13e7 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ endif DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) VERSION ?= dev COMMIT_SHA = $(shell git rev-parse HEAD) -RUNNER_VERSION ?= 2.316.0 +RUNNER_VERSION ?= 2.316.1 TARGETPLATFORM ?= $(shell arch) RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_TAG ?= ${VERSION} diff --git a/runner/Makefile b/runner/Makefile index 7e835dfc..26ece35c 100644 --- a/runner/Makefile +++ b/runner/Makefile @@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless OS_IMAGE ?= ubuntu-22.04 TARGETPLATFORM ?= $(shell arch) -RUNNER_VERSION ?= 2.316.0 +RUNNER_VERSION ?= 2.316.1 RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.0 DOCKER_VERSION ?= 24.0.7 diff --git a/runner/VERSION b/runner/VERSION index ffba9673..f62551d8 100644 --- a/runner/VERSION +++ b/runner/VERSION @@ -1,2 +1,2 @@ -RUNNER_VERSION=2.316.0 +RUNNER_VERSION=2.316.1 RUNNER_CONTAINER_HOOKS_VERSION=0.6.0 \ No newline at end of file diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index f0773bf3..12173fa5 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -36,7 +36,7 @@ var ( testResultCMNamePrefix = "test-result-" - RunnerVersion = "2.316.0" + RunnerVersion = "2.316.1" RunnerContainerHooksVersion = "0.6.0" ) From ea13873f148522bb0a5520c1213a9b8adb6b03db Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Fri, 17 May 2024 13:06:57 +0200 Subject: [PATCH 14/19] Remove service monitor that is not used in controller chart (#3526) --- charts/gha-runner-scale-set-controller/templates/_helpers.tpl | 4 ---- 1 file changed, 4 deletions(-) diff --git a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl index b4adbd00..075d21ae 100644 --- a/charts/gha-runner-scale-set-controller/templates/_helpers.tpl +++ b/charts/gha-runner-scale-set-controller/templates/_helpers.tpl @@ -126,7 +126,3 @@ Create the name of the service account to use {{- end }} {{- $names | join ","}} {{- end }} - -{{- define "gha-runner-scale-set-controller.serviceMonitorName" -}} -{{- include "gha-runner-scale-set-controller.fullname" . }}-service-monitor -{{- end }} From 9b51f25800f2342ef11c97c9e4a117a064147bf6 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Fri, 17 May 2024 14:37:13 +0200 Subject: [PATCH 15/19] Rename imports in tests to remove double import and to improve readability (#3455) --- .../autoscalinglistener_controller_test.go | 84 +++++------ .../ephemeralrunnerset_controller_test.go | 139 +++++++++--------- 2 files changed, 111 insertions(+), 112 deletions(-) diff --git a/controllers/actions.github.com/autoscalinglistener_controller_test.go b/controllers/actions.github.com/autoscalinglistener_controller_test.go index 63b5c742..994df5d8 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller_test.go +++ b/controllers/actions.github.com/autoscalinglistener_controller_test.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" ) const ( @@ -34,9 +34,9 @@ var _ = Describe("Test AutoScalingListener controller", func() { var ctx context.Context var mgr ctrl.Manager var autoscalingNS *corev1.Namespace - var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet + var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet var configSecret *corev1.Secret - var autoscalingListener *actionsv1alpha1.AutoscalingListener + var autoscalingListener *v1alpha1.AutoscalingListener BeforeEach(func() { ctx = context.Background() @@ -53,12 +53,12 @@ var _ = Describe("Test AutoScalingListener controller", func() { min := 1 max := 10 - autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, MaxRunners: &max, @@ -79,12 +79,12 @@ var _ = Describe("Test AutoScalingListener controller", func() { err = k8sClient.Create(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") - autoscalingListener = &actionsv1alpha1.AutoscalingListener{ + autoscalingListener = &v1alpha1.AutoscalingListener{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asl", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingListenerSpec{ + Spec: v1alpha1.AutoscalingListenerSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, RunnerScaleSetId: 1, @@ -119,7 +119,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { ).Should(Succeed(), "Config secret should be created") // Check if finalizer is added - created := new(actionsv1alpha1.AutoscalingListener) + created := new(v1alpha1.AutoscalingListener) Eventually( func() (string, error) { err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created) @@ -298,7 +298,7 @@ var _ = Describe("Test AutoScalingListener controller", func() { // The AutoScalingListener should be deleted Eventually( func() error { - listenerList := new(actionsv1alpha1.AutoscalingListenerList) + listenerList := new(v1alpha1.AutoscalingListenerList) err := k8sClient.List(ctx, listenerList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{".metadata.name": autoscalingListener.Name}) if err != nil { return err @@ -415,9 +415,9 @@ var _ = Describe("Test AutoScalingListener customization", func() { var ctx context.Context var mgr ctrl.Manager var autoscalingNS *corev1.Namespace - var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet + var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet var configSecret *corev1.Secret - var autoscalingListener *actionsv1alpha1.AutoscalingListener + var autoscalingListener *v1alpha1.AutoscalingListener var runAsUser int64 = 1001 @@ -458,12 +458,12 @@ var _ = Describe("Test AutoScalingListener customization", func() { min := 1 max := 10 - autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, MaxRunners: &max, @@ -484,12 +484,12 @@ var _ = Describe("Test AutoScalingListener customization", func() { err = k8sClient.Create(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") - autoscalingListener = &actionsv1alpha1.AutoscalingListener{ + autoscalingListener = &v1alpha1.AutoscalingListener{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asltest", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingListenerSpec{ + Spec: v1alpha1.AutoscalingListenerSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, RunnerScaleSetId: 1, @@ -512,7 +512,7 @@ var _ = Describe("Test AutoScalingListener customization", func() { Context("When creating a new AutoScalingListener", func() { It("It should create customized pod with applied configuration", func() { // Check if finalizer is added - created := new(actionsv1alpha1.AutoscalingListener) + created := new(v1alpha1.AutoscalingListener) Eventually( func() (string, error) { err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, created) @@ -570,19 +570,19 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() { var ctx context.Context var mgr ctrl.Manager var autoscalingNS *corev1.Namespace - var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet + var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet var configSecret *corev1.Secret - var autoscalingListener *actionsv1alpha1.AutoscalingListener + var autoscalingListener *v1alpha1.AutoscalingListener - createRunnerSetAndListener := func(proxy *actionsv1alpha1.ProxyConfig) { + createRunnerSetAndListener := func(proxy *v1alpha1.ProxyConfig) { min := 1 max := 10 - autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, MaxRunners: &max, @@ -604,12 +604,12 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() { err := k8sClient.Create(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") - autoscalingListener = &actionsv1alpha1.AutoscalingListener{ + autoscalingListener = &v1alpha1.AutoscalingListener{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asl", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingListenerSpec{ + Spec: v1alpha1.AutoscalingListenerSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, RunnerScaleSetId: 1, @@ -658,12 +658,12 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() { err := k8sClient.Create(ctx, proxyCredentials) Expect(err).NotTo(HaveOccurred(), "failed to create proxy credentials secret") - proxy := &actionsv1alpha1.ProxyConfig{ - HTTP: &actionsv1alpha1.ProxyServerConfig{ + proxy := &v1alpha1.ProxyConfig{ + HTTP: &v1alpha1.ProxyServerConfig{ Url: "http://localhost:8080", CredentialSecretRef: "proxy-credentials", }, - HTTPS: &actionsv1alpha1.ProxyServerConfig{ + HTTPS: &v1alpha1.ProxyServerConfig{ Url: "https://localhost:8443", CredentialSecretRef: "proxy-credentials", }, @@ -766,19 +766,19 @@ var _ = Describe("Test AutoScalingListener controller with template modification var ctx context.Context var mgr ctrl.Manager var autoscalingNS *corev1.Namespace - var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet + var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet var configSecret *corev1.Secret - var autoscalingListener *actionsv1alpha1.AutoscalingListener + var autoscalingListener *v1alpha1.AutoscalingListener createRunnerSetAndListener := func(listenerTemplate *corev1.PodTemplateSpec) { min := 1 max := 10 - autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, MaxRunners: &max, @@ -800,12 +800,12 @@ var _ = Describe("Test AutoScalingListener controller with template modification err := k8sClient.Create(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") - autoscalingListener = &actionsv1alpha1.AutoscalingListener{ + autoscalingListener = &v1alpha1.AutoscalingListener{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asl", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingListenerSpec{ + Spec: v1alpha1.AutoscalingListenerSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, RunnerScaleSetId: 1, @@ -915,9 +915,9 @@ var _ = Describe("Test GitHub Server TLS configuration", func() { var ctx context.Context var mgr ctrl.Manager var autoscalingNS *corev1.Namespace - var autoscalingRunnerSet *actionsv1alpha1.AutoscalingRunnerSet + var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet var configSecret *corev1.Secret - var autoscalingListener *actionsv1alpha1.AutoscalingListener + var autoscalingListener *v1alpha1.AutoscalingListener var rootCAConfigMap *corev1.ConfigMap BeforeEach(func() { @@ -955,16 +955,16 @@ var _ = Describe("Test GitHub Server TLS configuration", func() { min := 1 max := 10 - autoscalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{ + autoscalingRunnerSet = &v1alpha1.AutoscalingRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{ + Spec: v1alpha1.AutoscalingRunnerSetSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, - GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ - CertificateFrom: &actionsv1alpha1.TLSCertificateSource{ + GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: rootCAConfigMap.Name, @@ -991,16 +991,16 @@ var _ = Describe("Test GitHub Server TLS configuration", func() { err = k8sClient.Create(ctx, autoscalingRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet") - autoscalingListener = &actionsv1alpha1.AutoscalingListener{ + autoscalingListener = &v1alpha1.AutoscalingListener{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asl", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.AutoscalingListenerSpec{ + Spec: v1alpha1.AutoscalingListenerSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, - GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ - CertificateFrom: &actionsv1alpha1.TLSCertificateSource{ + GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ + CertificateFrom: &v1alpha1.TLSCertificateSource{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: rootCAConfigMap.Name, diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go index 79ad2d6e..3c74b72e 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller_test.go @@ -23,8 +23,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" - v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions/fake" "github.com/actions/actions-runner-controller/github/actions/testserver" @@ -40,7 +39,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { var ctx context.Context var mgr ctrl.Manager var autoscalingNS *corev1.Namespace - var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet + var ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet var configSecret *corev1.Secret BeforeEach(func() { @@ -57,13 +56,13 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err := controller.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred(), "failed to setup controller") - ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ + ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ - EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ + Spec: v1alpha1.EphemeralRunnerSetSpec{ + EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigSecret: configSecret.Name, RunnerScaleSetId: 100, @@ -90,7 +89,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Context("When creating a new EphemeralRunnerSet", func() { It("It should create/add all required resources for a new EphemeralRunnerSet (finalizer)", func() { // Check if finalizer is added - created := new(actionsv1alpha1.EphemeralRunnerSet) + created := new(v1alpha1.EphemeralRunnerSet) Eventually( func() (string, error) { err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) @@ -108,7 +107,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Check if the number of ephemeral runners are stay 0 Consistently( func() (int, error) { - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { return -1, err @@ -122,7 +121,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Check if the status stay 0 Consistently( func() (int, error) { - runnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + runnerSet := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet) if err != nil { return -1, err @@ -142,7 +141,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Check if the number of ephemeral runners are created Eventually( func() (int, error) { - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { return -1, err @@ -176,7 +175,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Check if the status is updated Eventually( func() (int, error) { - runnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + runnerSet := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet) if err != nil { return -1, err @@ -191,7 +190,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Context("When deleting a new EphemeralRunnerSet", func() { It("It should cleanup all resources for a deleting EphemeralRunnerSet before removing it", func() { - created := new(actionsv1alpha1.EphemeralRunnerSet) + created := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -204,7 +203,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Wait for the EphemeralRunnerSet to be scaled up Eventually( func() (int, error) { - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { return -1, err @@ -242,7 +241,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Check if all ephemeral runners are deleted Eventually( func() (int, error) { - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { return -1, err @@ -256,7 +255,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Check if the EphemeralRunnerSet is deleted Eventually( func() error { - deleted := new(actionsv1alpha1.EphemeralRunnerSet) + deleted := new(v1alpha1.EphemeralRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, deleted) if err != nil { if kerrors.IsNotFound(err) { @@ -275,7 +274,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Context("When a new EphemeralRunnerSet scale up and down", func() { It("Should scale up with patch ID 0", func() { - ers := new(actionsv1alpha1.EphemeralRunnerSet) + ers := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -286,7 +285,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -302,7 +301,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) It("Should scale up when patch ID changes", func() { - ers := new(actionsv1alpha1.EphemeralRunnerSet) + ers := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -313,7 +312,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -327,7 +326,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { ephemeralRunnerSetTestInterval, ).Should(BeEquivalentTo(1), "1 EphemeralRunner should be created") - ers = new(actionsv1alpha1.EphemeralRunnerSet) + ers = new(v1alpha1.EphemeralRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -338,7 +337,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -354,7 +353,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) It("Should clean up finished ephemeral runner when scaling down", func() { - ers := new(actionsv1alpha1.EphemeralRunnerSet) + ers := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -365,7 +364,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -390,7 +389,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") // Keep the ephemeral runner until the next patch - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -405,7 +404,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { ).Should(BeEquivalentTo(2), "1 EphemeralRunner should be up") // The listener was slower to patch the completed, but we should still have 1 running - ers = new(actionsv1alpha1.EphemeralRunnerSet) + ers = new(v1alpha1.EphemeralRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -416,7 +415,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -432,7 +431,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) It("Should keep finished ephemeral runners until patch id changes", func() { - ers := new(actionsv1alpha1.EphemeralRunnerSet) + ers := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -443,7 +442,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -468,7 +467,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") // confirm they are not deleted - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Consistently( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -484,7 +483,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) It("Should handle double scale up", func() { - ers := new(actionsv1alpha1.EphemeralRunnerSet) + ers := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -495,7 +494,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -520,7 +519,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1])) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") - ers = new(actionsv1alpha1.EphemeralRunnerSet) + ers = new(v1alpha1.EphemeralRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -531,7 +530,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) // We should have 3 runners, and have no Succeeded ones Eventually( func() error { @@ -558,7 +557,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) It("Should handle scale down without removing pending runners", func() { - ers := new(actionsv1alpha1.EphemeralRunnerSet) + ers := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -569,7 +568,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -594,7 +593,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") // Wait for these statuses to actually be updated - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Eventually( func() error { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -623,7 +622,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { ).Should(BeNil(), "1 EphemeralRunner should be in Pending and 1 in Succeeded phase") // Scale down to 0, while 1 is still pending. This simulates the difference between the desired and actual state - ers = new(actionsv1alpha1.EphemeralRunnerSet) + ers = new(v1alpha1.EphemeralRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -634,7 +633,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) // We should have 1 runner up and pending Eventually( func() error { @@ -678,7 +677,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) It("Should kill pending and running runners if they are up for some reason and the batch contains no jobs", func() { - ers := new(actionsv1alpha1.EphemeralRunnerSet) + ers := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -689,7 +688,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -715,7 +714,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") // Wait for these statuses to actually be updated - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Eventually( func() error { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -748,7 +747,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Scale down to 0 with patch ID 0. This forces the scale down to self correct on empty batch - ers = new(actionsv1alpha1.EphemeralRunnerSet) + ers = new(v1alpha1.EphemeralRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -759,7 +758,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Consistently( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -786,7 +785,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner") // Now, eventually, they should be deleted - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -803,7 +802,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) It("Should replace finished ephemeral runners with new ones", func() { - ers := new(actionsv1alpha1.EphemeralRunnerSet) + ers := new(v1alpha1.EphemeralRunnerSet) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -814,7 +813,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually( func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -841,7 +840,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Wait for these statuses to actually be updated - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Eventually( func() error { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -874,7 +873,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { // Now, let's simulate replacement. The desired count is still 2. // This simulates that we got 1 job assigned, and 1 job completed. - ers = new(actionsv1alpha1.EphemeralRunnerSet) + ers = new(v1alpha1.EphemeralRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, ers) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -885,7 +884,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err = k8sClient.Patch(ctx, updated, client.MergeFrom(ers)) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet") - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) Eventually( func() error { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -911,7 +910,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { }) It("Should update status on Ephemeral Runner state changes", func() { - created := new(actionsv1alpha1.EphemeralRunnerSet) + created := new(v1alpha1.EphemeralRunnerSet) Eventually( func() error { return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created) @@ -926,7 +925,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { err := k8sClient.Update(ctx, updated) Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet replica count") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually( func() (bool, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) @@ -1036,7 +1035,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() { Eventually( func() (int, error) { - runnerList = new(actionsv1alpha1.EphemeralRunnerList) + runnerList = new(v1alpha1.EphemeralRunnerList) err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { return -1, err @@ -1091,7 +1090,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( var ctx context.Context var mgr ctrl.Manager var autoscalingNS *corev1.Namespace - var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet + var ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet var configSecret *corev1.Secret BeforeEach(func() { @@ -1126,14 +1125,14 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( err := k8sClient.Create(ctx, secretCredentials) Expect(err).NotTo(HaveOccurred(), "failed to create secret credentials") - ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ + ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ + Spec: v1alpha1.EphemeralRunnerSetSpec{ Replicas: 1, - EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ + EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ GitHubConfigUrl: "http://example.com/owner/repo", GitHubConfigSecret: configSecret.Name, RunnerScaleSetId: 100, @@ -1193,7 +1192,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( ).Should(Succeed(), "compiled / flattened proxy secret should exist") Eventually(func(g Gomega) { - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) g.Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunners") @@ -1211,7 +1210,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( // Set pods to PodSucceeded to simulate an actual EphemeralRunner stopping Eventually( func(g Gomega) (int, error) { - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { return -1, err @@ -1293,14 +1292,14 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( proxy.Close() }) - ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ + ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ + Spec: v1alpha1.EphemeralRunnerSetSpec{ Replicas: 1, - EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ + EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ GitHubConfigUrl: "http://example.com/owner/repo", GitHubConfigSecret: configSecret.Name, RunnerScaleSetId: 100, @@ -1327,7 +1326,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( err = k8sClient.Create(ctx, ephemeralRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually(func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { @@ -1346,7 +1345,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func( err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") - runnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + runnerSet := new(v1alpha1.EphemeralRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, runnerSet) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") @@ -1369,7 +1368,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func( var ctx context.Context var mgr ctrl.Manager var autoscalingNS *corev1.Namespace - var ephemeralRunnerSet *actionsv1alpha1.EphemeralRunnerSet + var ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet var configSecret *corev1.Secret var rootCAConfigMap *corev1.ConfigMap @@ -1431,17 +1430,17 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func( server.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} server.StartTLS() - ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{ + ephemeralRunnerSet = &v1alpha1.EphemeralRunnerSet{ ObjectMeta: metav1.ObjectMeta{ Name: "test-asrs", Namespace: autoscalingNS.Name, }, - Spec: actionsv1alpha1.EphemeralRunnerSetSpec{ + Spec: v1alpha1.EphemeralRunnerSetSpec{ Replicas: 1, - EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{ + EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ GitHubConfigUrl: server.ConfigURLForOrg("my-org"), GitHubConfigSecret: configSecret.Name, - GitHubServerTLS: &actionsv1alpha1.GitHubServerTLSConfig{ + GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ CertificateFrom: &v1alpha1.TLSCertificateSource{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ @@ -1469,7 +1468,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func( err = k8sClient.Create(ctx, ephemeralRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet") - runnerList := new(actionsv1alpha1.EphemeralRunnerList) + runnerList := new(v1alpha1.EphemeralRunnerList) Eventually(func() (int, error) { err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace)) if err != nil { @@ -1491,7 +1490,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func( err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0])) Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status") - currentRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet) + currentRunnerSet := new(v1alpha1.EphemeralRunnerSet) err = k8sClient.Get(ctx, client.ObjectKey{Namespace: ephemeralRunnerSet.Namespace, Name: ephemeralRunnerSet.Name}, currentRunnerSet) Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet") From fa7a4f584ef8e3c7ea9f134f6ba4b1c3ec32d03e Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Fri, 17 May 2024 14:42:46 +0200 Subject: [PATCH 16/19] Extract single place to set up indexers (#3454) --- .golangci.yaml | 4 +- Makefile | 2 +- .../autoscalinglistener_controller.go | 24 ------- .../autoscalingrunnerset_controller.go | 21 ------ .../ephemeralrunner_controller.go | 1 - .../ephemeralrunnerset_controller.go | 22 ------ .../actions.github.com/helpers_test.go | 3 + controllers/actions.github.com/indexer.go | 71 +++++++++++++++++++ main.go | 4 ++ 9 files changed, 82 insertions(+), 70 deletions(-) create mode 100644 controllers/actions.github.com/indexer.go diff --git a/.golangci.yaml b/.golangci.yaml index 35520223..eca46937 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,7 +1,9 @@ run: timeout: 3m output: - format: github-actions + formats: + - format: github-actions + path: stdout linters-settings: errcheck: exclude-functions: diff --git a/Makefile b/Makefile index 684f13e7..5f1302af 100644 --- a/Makefile +++ b/Makefile @@ -68,7 +68,7 @@ endif all: manager lint: - docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.55.2 golangci-lint run + docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.57.2 golangci-lint run GO_TEST_ARGS ?= -short diff --git a/controllers/actions.github.com/autoscalinglistener_controller.go b/controllers/actions.github.com/autoscalinglistener_controller.go index 50803c91..f35c85e9 100644 --- a/controllers/actions.github.com/autoscalinglistener_controller.go +++ b/controllers/actions.github.com/autoscalinglistener_controller.go @@ -690,30 +690,6 @@ func (r *AutoscalingListenerReconciler) publishRunningListener(autoscalingListen // SetupWithManager sets up the controller with the Manager. func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error { - groupVersionIndexer := func(rawObj client.Object) []string { - groupVersion := v1alpha1.GroupVersion.String() - owner := metav1.GetControllerOf(rawObj) - if owner == nil { - return nil - } - - // ...make sure it is owned by this controller - if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" { - return nil - } - - // ...and if so, return it - return []string{owner.Name} - } - - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil { - return err - } - - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil { - return err - } - labelBasedWatchFunc := func(_ context.Context, obj client.Object) []reconcile.Request { var requests []reconcile.Request labels := obj.GetLabels() diff --git a/controllers/actions.github.com/autoscalingrunnerset_controller.go b/controllers/actions.github.com/autoscalingrunnerset_controller.go index 2ed654e8..f87a11af 100644 --- a/controllers/actions.github.com/autoscalingrunnerset_controller.go +++ b/controllers/actions.github.com/autoscalingrunnerset_controller.go @@ -30,7 +30,6 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -759,26 +758,6 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Con // SetupWithManager sets up the controller with the Manager. func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { - groupVersionIndexer := func(rawObj client.Object) []string { - groupVersion := v1alpha1.GroupVersion.String() - owner := metav1.GetControllerOf(rawObj) - if owner == nil { - return nil - } - - // ...make sure it is owned by this controller - if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingRunnerSet" { - return nil - } - - // ...and if so, return it - return []string{owner.Name} - } - - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil { - return err - } - return ctrl.NewControllerManagedBy(mgr). For(&v1alpha1.AutoscalingRunnerSet{}). Owns(&v1alpha1.EphemeralRunnerSet{}). diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index 8765c1a0..6da084b7 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -815,7 +815,6 @@ func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, // SetupWithManager sets up the controller with the Manager. func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error { - // TODO(nikola-jokic): Add indexing and filtering fields on corev1.Pod{} return ctrl.NewControllerManagedBy(mgr). For(&v1alpha1.EphemeralRunner{}). Owns(&corev1.Pod{}). diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index b462e177..4c3692b8 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -574,28 +574,6 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte // SetupWithManager sets up the controller with the Manager. func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { - // Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups. - if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string { - groupVersion := v1alpha1.GroupVersion.String() - - // grab the job object, extract the owner... - ephemeralRunner := rawObj.(*v1alpha1.EphemeralRunner) - owner := metav1.GetControllerOf(ephemeralRunner) - if owner == nil { - return nil - } - - // ...make sure it is owned by this controller - if owner.APIVersion != groupVersion || owner.Kind != "EphemeralRunnerSet" { - return nil - } - - // ...and if so, return it - return []string{owner.Name} - }); err != nil { - return err - } - return ctrl.NewControllerManagedBy(mgr). For(&v1alpha1.EphemeralRunnerSet{}). Owns(&v1alpha1.EphemeralRunner{}). diff --git a/controllers/actions.github.com/helpers_test.go b/controllers/actions.github.com/helpers_test.go index 99f5d411..5594280f 100644 --- a/controllers/actions.github.com/helpers_test.go +++ b/controllers/actions.github.com/helpers_test.go @@ -18,6 +18,9 @@ const defaultGitHubToken = "gh_token" func startManagers(t ginkgo.GinkgoTInterface, first manager.Manager, others ...manager.Manager) { for _, mgr := range append([]manager.Manager{first}, others...) { + if err := SetupIndexers(mgr); err != nil { + t.Fatalf("failed to setup indexers: %v", err) + } ctx, cancel := context.WithCancel(context.Background()) g, ctx := errgroup.WithContext(ctx) diff --git a/controllers/actions.github.com/indexer.go b/controllers/actions.github.com/indexer.go new file mode 100644 index 00000000..466c9f14 --- /dev/null +++ b/controllers/actions.github.com/indexer.go @@ -0,0 +1,71 @@ +package actionsgithubcom + +import ( + "context" + "slices" + + v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func SetupIndexers(mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &corev1.Pod{}, + resourceOwnerKey, + newGroupVersionOwnerKindIndexer("AutoscalingListener", "EphemeralRunner"), + ); err != nil { + return err + } + + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &corev1.ServiceAccount{}, + resourceOwnerKey, + newGroupVersionOwnerKindIndexer("AutoscalingListener"), + ); err != nil { + return err + } + + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &v1alpha1.EphemeralRunnerSet{}, + resourceOwnerKey, + newGroupVersionOwnerKindIndexer("AutoscalingRunnerSet"), + ); err != nil { + return err + } + + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &v1alpha1.EphemeralRunner{}, + resourceOwnerKey, + newGroupVersionOwnerKindIndexer("EphemeralRunnerSet"), + ); err != nil { + return err + } + + return nil +} + +func newGroupVersionOwnerKindIndexer(ownerKind string, otherOwnerKinds ...string) client.IndexerFunc { + owners := append([]string{ownerKind}, otherOwnerKinds...) + return func(o client.Object) []string { + groupVersion := v1alpha1.GroupVersion.String() + owner := metav1.GetControllerOfNoCopy(o) + if owner == nil { + return nil + } + + // ...make sure it is owned by this controller + if owner.APIVersion != groupVersion || !slices.Contains(owners, owner.Kind) { + return nil + } + + // ...and if so, return it + return []string{owner.Name} + } +} diff --git a/main.go b/main.go index cfe14c6c..3392377c 100644 --- a/main.go +++ b/main.go @@ -239,6 +239,10 @@ func main() { } if autoScalingRunnerSetOnly { + if err := actionsgithubcom.SetupIndexers(mgr); err != nil { + log.Error(err, "unable to setup indexers") + os.Exit(1) + } managerImage := os.Getenv("CONTROLLER_MANAGER_CONTAINER_IMAGE") if managerImage == "" { log.Error(err, "unable to obtain listener image") From ab92e4edc305917c21b4d6d751da22c93bb373dd Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Fri, 17 May 2024 15:12:16 +0200 Subject: [PATCH 17/19] Re-use the last desired patch on empty batch (#3453) --- cmd/ghalistener/worker/worker.go | 85 +++-- cmd/ghalistener/worker/worker_test.go | 326 ++++++++++++++++++ .../ephemeralrunnerset_controller.go | 3 - 3 files changed, 376 insertions(+), 38 deletions(-) create mode 100644 cmd/ghalistener/worker/worker_test.go diff --git a/cmd/ghalistener/worker/worker.go b/cmd/ghalistener/worker/worker.go index 25fb90e1..9d6266bf 100644 --- a/cmd/ghalistener/worker/worker.go +++ b/cmd/ghalistener/worker/worker.go @@ -38,20 +38,20 @@ type Config struct { // The Worker's role is to process the messages it receives from the listener. // It then initiates Kubernetes API requests to carry out the necessary actions. type Worker struct { - clientset *kubernetes.Clientset - config Config - lastPatch int - lastPatchID int - logger *logr.Logger + clientset *kubernetes.Clientset + config Config + lastPatch int + patchSeq int + logger *logr.Logger } var _ listener.Handler = (*Worker)(nil) func New(config Config, options ...Option) (*Worker, error) { w := &Worker{ - config: config, - lastPatch: -1, - lastPatchID: -1, + config: config, + lastPatch: -1, + patchSeq: -1, } conf, err := rest.InClusterConfig() @@ -163,27 +163,8 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart // The function then scales the ephemeral runner set by applying the merge patch. // Finally, it logs the scaled ephemeral runner set details and returns nil if successful. // If any error occurs during the process, it returns an error with a descriptive message. -func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) { - // Max runners should always be set by the resource builder either to the configured value, - // or the maximum int32 (resourcebuilder.newAutoScalingListener()). - targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners) - - logValues := []any{ - "assigned job", count, - "decision", targetRunnerCount, - "min", w.config.MinRunners, - "max", w.config.MaxRunners, - "currentRunnerCount", w.lastPatch, - "jobsCompleted", jobsCompleted, - } - - if count == 0 && jobsCompleted == 0 { - w.lastPatchID = 0 - } else { - w.lastPatchID++ - } - - w.lastPatch = targetRunnerCount +func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) { + patchID := w.setDesiredWorkerState(count, jobsCompleted) original, err := json.Marshal( &v1alpha1.EphemeralRunnerSet{ @@ -200,8 +181,8 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo patch, err := json.Marshal( &v1alpha1.EphemeralRunnerSet{ Spec: v1alpha1.EphemeralRunnerSetSpec{ - Replicas: targetRunnerCount, - PatchID: w.lastPatchID, + Replicas: w.lastPatch, + PatchID: patchID, }, }, ) @@ -210,14 +191,13 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo return 0, err } + w.logger.Info("Compare", "original", string(original), "patch", string(patch)) mergePatch, err := jsonpatch.CreateMergePatch(original, patch) if err != nil { return 0, fmt.Errorf("failed to create merge patch json for ephemeral runner set: %w", err) } - w.logger.Info("Created merge patch json for EphemeralRunnerSet update", "json", string(mergePatch)) - - w.logger.Info("Scaling ephemeral runner set", logValues...) + w.logger.Info("Preparing EphemeralRunnerSet update", "json", string(mergePatch)) patchedEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{} err = w.clientset.RESTClient(). @@ -238,5 +218,40 @@ func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCo "name", w.config.EphemeralRunnerSetName, "replicas", patchedEphemeralRunnerSet.Spec.Replicas, ) - return targetRunnerCount, nil + return w.lastPatch, nil +} + +// calculateDesiredState calculates the desired state of the worker based on the desired count and the the number of jobs completed. +func (w *Worker) setDesiredWorkerState(count, jobsCompleted int) int { + // Max runners should always be set by the resource builder either to the configured value, + // or the maximum int32 (resourcebuilder.newAutoScalingListener()). + targetRunnerCount := min(w.config.MinRunners+count, w.config.MaxRunners) + w.patchSeq++ + desiredPatchID := w.patchSeq + + if count == 0 && jobsCompleted == 0 { // empty batch + targetRunnerCount = max(w.lastPatch, targetRunnerCount) + if targetRunnerCount == w.config.MinRunners { + // We have an empty batch, and the last patch was the min runners. + // Since this is an empty batch, and we are at the min runners, they should all be idle. + // If controller created few more pods on accident (during scale down events), + // this situation allows the controller to scale down to the min runners. + // However, it is important to keep the patch sequence increasing so we don't ignore one batch. + desiredPatchID = 0 + } + } + + w.lastPatch = targetRunnerCount + + w.logger.Info( + "Calculated target runner count", + "assigned job", count, + "decision", targetRunnerCount, + "min", w.config.MinRunners, + "max", w.config.MaxRunners, + "currentRunnerCount", w.lastPatch, + "jobsCompleted", jobsCompleted, + ) + + return desiredPatchID } diff --git a/cmd/ghalistener/worker/worker_test.go b/cmd/ghalistener/worker/worker_test.go new file mode 100644 index 00000000..d009bccf --- /dev/null +++ b/cmd/ghalistener/worker/worker_test.go @@ -0,0 +1,326 @@ +package worker + +import ( + "math" + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" +) + +func TestSetDesiredWorkerState_MinMaxDefaults(t *testing.T) { + logger := logr.Discard() + newEmptyWorker := func() *Worker { + return &Worker{ + config: Config{ + MinRunners: 0, + MaxRunners: math.MaxInt32, + }, + lastPatch: -1, + patchSeq: -1, + logger: &logger, + } + } + + t.Run("init calculate with acquired 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(0, 0) + assert.Equal(t, 0, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + assert.Equal(t, 0, patchID) + }) + + t.Run("init calculate with acquired 1", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(1, 0) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + assert.Equal(t, 0, patchID) + }) + + t.Run("increment patch when job done", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(1, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 1) + assert.Equal(t, 1, patchID) + assert.Equal(t, 0, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("increment patch when called with same parameters", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(1, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(1, 0) + assert.Equal(t, 1, patchID) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("calculate desired scale when acquired > 0 and completed > 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(1, 1) + assert.Equal(t, 0, patchID) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + }) + + t.Run("re-use the last state when acquired == 0 and completed == 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(1, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 0) + assert.Equal(t, 1, patchID) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("adjust when acquired == 0 and completed == 1", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(1, 1) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 1) + assert.Equal(t, 1, patchID) + assert.Equal(t, 0, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) +} + +func TestSetDesiredWorkerState_MinSet(t *testing.T) { + logger := logr.Discard() + newEmptyWorker := func() *Worker { + return &Worker{ + config: Config{ + MinRunners: 1, + MaxRunners: math.MaxInt32, + }, + lastPatch: -1, + patchSeq: -1, + logger: &logger, + } + } + + t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(0, 0) + assert.Equal(t, 0, patchID) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + }) + + t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(2, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 0) + assert.Equal(t, 1, patchID) + assert.Equal(t, 3, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("request back to 0 on job done", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(2, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 1) + assert.Equal(t, 1, patchID) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("desired patch is 0 but sequence continues on empty batch and min runners", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(3, 0) + assert.Equal(t, 0, patchID) + assert.Equal(t, 4, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + + patchID = w.setDesiredWorkerState(0, 3) + assert.Equal(t, 1, patchID) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + + // Empty batch on min runners + patchID = w.setDesiredWorkerState(0, 0) + assert.Equal(t, 0, patchID) // forcing the state + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 2, w.patchSeq) + }) + +} + +func TestSetDesiredWorkerState_MaxSet(t *testing.T) { + logger := logr.Discard() + newEmptyWorker := func() *Worker { + return &Worker{ + config: Config{ + MinRunners: 0, + MaxRunners: 5, + }, + lastPatch: -1, + patchSeq: -1, + logger: &logger, + } + } + + t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(0, 0) + assert.Equal(t, 0, patchID) + assert.Equal(t, 0, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + }) + + t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(2, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 0) + assert.Equal(t, 1, patchID) + assert.Equal(t, 2, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("request back to 0 on job done", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(2, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 1) + assert.Equal(t, 1, patchID) + assert.Equal(t, 0, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("scale up to max when count > max", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(6, 0) + assert.Equal(t, 0, patchID) + assert.Equal(t, 5, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + }) + + t.Run("scale to max when count == max", func(t *testing.T) { + w := newEmptyWorker() + w.setDesiredWorkerState(5, 0) + assert.Equal(t, 5, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + }) + + t.Run("scale to max when count > max and completed > 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(1, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(6, 1) + assert.Equal(t, 1, patchID) + assert.Equal(t, 5, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("scale back to 0 when count was > max", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(6, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 1) + assert.Equal(t, 1, patchID) + assert.Equal(t, 0, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(3, 0) + assert.Equal(t, 0, patchID) + assert.Equal(t, 3, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + + patchID = w.setDesiredWorkerState(0, 3) + assert.Equal(t, 1, patchID) + assert.Equal(t, 0, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + + // Empty batch on min runners + patchID = w.setDesiredWorkerState(0, 0) + assert.Equal(t, 0, patchID) // forcing the state + assert.Equal(t, 0, w.lastPatch) + assert.Equal(t, 2, w.patchSeq) + }) +} + +func TestSetDesiredWorkerState_MinMaxSet(t *testing.T) { + logger := logr.Discard() + newEmptyWorker := func() *Worker { + return &Worker{ + config: Config{ + MinRunners: 1, + MaxRunners: 3, + }, + lastPatch: -1, + patchSeq: -1, + logger: &logger, + } + } + + t.Run("initial scale when acquired == 0 and completed == 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(0, 0) + assert.Equal(t, 0, patchID) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + }) + + t.Run("re-use the old state on count == 0 and completed == 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(2, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 0) + assert.Equal(t, 1, patchID) + assert.Equal(t, 3, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("scale to min when count == 0", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(2, 0) + assert.Equal(t, 0, patchID) + patchID = w.setDesiredWorkerState(0, 1) + assert.Equal(t, 1, patchID) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + }) + + t.Run("scale up to max when count > max", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(4, 0) + assert.Equal(t, 0, patchID) + assert.Equal(t, 3, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + }) + + t.Run("scale to max when count == max", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(3, 0) + assert.Equal(t, 0, patchID) + assert.Equal(t, 3, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + }) + + t.Run("force 0 on empty batch and last patch == min runners", func(t *testing.T) { + w := newEmptyWorker() + patchID := w.setDesiredWorkerState(3, 0) + assert.Equal(t, 0, patchID) + assert.Equal(t, 3, w.lastPatch) + assert.Equal(t, 0, w.patchSeq) + + patchID = w.setDesiredWorkerState(0, 3) + assert.Equal(t, 1, patchID) + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 1, w.patchSeq) + + // Empty batch on min runners + patchID = w.setDesiredWorkerState(0, 0) + assert.Equal(t, 0, patchID) // forcing the state + assert.Equal(t, 1, w.lastPatch) + assert.Equal(t, 2, w.patchSeq) + }) +} diff --git a/controllers/actions.github.com/ephemeralrunnerset_controller.go b/controllers/actions.github.com/ephemeralrunnerset_controller.go index 4c3692b8..c5d166a5 100644 --- a/controllers/actions.github.com/ephemeralrunnerset_controller.go +++ b/controllers/actions.github.com/ephemeralrunnerset_controller.go @@ -213,9 +213,6 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R // on the next batch case ephemeralRunnerSet.Spec.PatchID == 0 && total > ephemeralRunnerSet.Spec.Replicas: count := total - ephemeralRunnerSet.Spec.Replicas - if count <= 0 { - break - } log.Info("Deleting ephemeral runners (scale down)", "count", count) if err := r.deleteIdleEphemeralRunners( ctx, From 3bda9bb2402b361e3bab78fdcd2b67da1c4e3638 Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Fri, 17 May 2024 15:16:38 +0200 Subject: [PATCH 18/19] Refresh session if token expires during delete message (#3529) --- cmd/ghalistener/listener/listener.go | 19 ++++- cmd/ghalistener/listener/listener_test.go | 87 +++++++++++++++++++++++ 2 files changed, 104 insertions(+), 2 deletions(-) diff --git a/cmd/ghalistener/listener/listener.go b/cmd/ghalistener/listener/listener.go index 79009726..a9cf0838 100644 --- a/cmd/ghalistener/listener/listener.go +++ b/cmd/ghalistener/listener/listener.go @@ -295,8 +295,23 @@ func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessa func (l *Listener) deleteLastMessage(ctx context.Context) error { l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID) - if err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID); err != nil { - return fmt.Errorf("failed to delete message: %w", err) + err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID) + if err == nil { // if NO error + return nil + } + + expiredError := &actions.MessageQueueTokenExpiredError{} + if !errors.As(err, &expiredError) { + return fmt.Errorf("failed to delete last message: %w", err) + } + + if err := l.refreshSession(ctx); err != nil { + return err + } + + err = l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID) + if err != nil { + return fmt.Errorf("failed to delete last message after message session refresh: %w", err) } return nil diff --git a/cmd/ghalistener/listener/listener_test.go b/cmd/ghalistener/listener/listener_test.go index d2af2b03..38c1f40f 100644 --- a/cmd/ghalistener/listener/listener_test.go +++ b/cmd/ghalistener/listener/listener_test.go @@ -377,6 +377,93 @@ func TestListener_deleteLastMessage(t *testing.T) { err = l.deleteLastMessage(ctx) assert.NotNil(t, err) }) + + t.Run("RefreshAndSucceeds", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + config := Config{ + ScaleSetID: 1, + Metrics: metrics.Discard, + } + + client := listenermocks.NewClient(t) + + newUUID := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &newUUID, + OwnerName: "example", + RunnerScaleSet: &actions.RunnerScaleSet{}, + MessageQueueUrl: "https://example.com", + MessageQueueAccessToken: "1234567890", + Statistics: nil, + } + client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once() + + client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Once() + + client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.MatchedBy(func(lastMessageID any) bool { + return lastMessageID.(int64) == int64(5) + })).Return(nil).Once() + config.Client = client + + l, err := New(config) + require.Nil(t, err) + + oldUUID := uuid.New() + l.session = &actions.RunnerScaleSetSession{ + SessionId: &oldUUID, + RunnerScaleSet: &actions.RunnerScaleSet{}, + } + l.lastMessageID = 5 + + config.Client = client + + err = l.deleteLastMessage(ctx) + assert.NoError(t, err) + }) + + t.Run("RefreshAndFails", func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + config := Config{ + ScaleSetID: 1, + Metrics: metrics.Discard, + } + + client := listenermocks.NewClient(t) + + newUUID := uuid.New() + session := &actions.RunnerScaleSetSession{ + SessionId: &newUUID, + OwnerName: "example", + RunnerScaleSet: &actions.RunnerScaleSet{}, + MessageQueueUrl: "https://example.com", + MessageQueueAccessToken: "1234567890", + Statistics: nil, + } + client.On("RefreshMessageSession", ctx, mock.Anything, mock.Anything).Return(session, nil).Once() + + client.On("DeleteMessage", ctx, mock.Anything, mock.Anything, mock.Anything).Return(&actions.MessageQueueTokenExpiredError{}).Twice() + + config.Client = client + + l, err := New(config) + require.Nil(t, err) + + oldUUID := uuid.New() + l.session = &actions.RunnerScaleSetSession{ + SessionId: &oldUUID, + RunnerScaleSet: &actions.RunnerScaleSet{}, + } + l.lastMessageID = 5 + + config.Client = client + + err = l.deleteLastMessage(ctx) + assert.Error(t, err) + }) } func TestListener_Listen(t *testing.T) { From 3be7128f9aa19a49f3af599b67489f8415d8b03e Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Mon, 20 May 2024 10:58:06 +0200 Subject: [PATCH 19/19] Prepare 0.9.2 release (#3530) --- .github/workflows/gha-e2e-tests.yaml | 2 +- charts/gha-runner-scale-set-controller/Chart.yaml | 4 ++-- charts/gha-runner-scale-set/Chart.yaml | 4 ++-- docs/gha-runner-scale-set-controller/README.md | 8 ++++++++ 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.github/workflows/gha-e2e-tests.yaml b/.github/workflows/gha-e2e-tests.yaml index 93879063..0e8b244e 100644 --- a/.github/workflows/gha-e2e-tests.yaml +++ b/.github/workflows/gha-e2e-tests.yaml @@ -16,7 +16,7 @@ env: TARGET_ORG: actions-runner-controller TARGET_REPO: arc_e2e_test_dummy IMAGE_NAME: "arc-test-image" - IMAGE_VERSION: "0.9.1" + IMAGE_VERSION: "0.9.2" concurrency: # This will make sure we only apply the concurrency limits on pull requests diff --git a/charts/gha-runner-scale-set-controller/Chart.yaml b/charts/gha-runner-scale-set-controller/Chart.yaml index e1aec2ca..d3b1ded4 100644 --- a/charts/gha-runner-scale-set-controller/Chart.yaml +++ b/charts/gha-runner-scale-set-controller/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.9.1 +version: 0.9.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.9.1" +appVersion: "0.9.2" home: https://github.com/actions/actions-runner-controller diff --git a/charts/gha-runner-scale-set/Chart.yaml b/charts/gha-runner-scale-set/Chart.yaml index 934d41c1..52514a01 100644 --- a/charts/gha-runner-scale-set/Chart.yaml +++ b/charts/gha-runner-scale-set/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.9.1 +version: 0.9.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.9.1" +appVersion: "0.9.2" home: https://github.com/actions/actions-runner-controller diff --git a/docs/gha-runner-scale-set-controller/README.md b/docs/gha-runner-scale-set-controller/README.md index f9044acf..f7a353f0 100644 --- a/docs/gha-runner-scale-set-controller/README.md +++ b/docs/gha-runner-scale-set-controller/README.md @@ -43,6 +43,14 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h ## Changelog +### v0.9.2 + +1. Refresh session if token expires during delete message [#3529](https://github.com/actions/actions-runner-controller/pull/3529) +1. Re-use the last desired patch on empty batch [#3453](https://github.com/actions/actions-runner-controller/pull/3453) +1. Extract single place to set up indexers [#3454](https://github.com/actions/actions-runner-controller/pull/3454) +1. Include controller version in logs [#3473](https://github.com/actions/actions-runner-controller/pull/3473) +1. Propogate arbitrary labels from runnersets to all created resources [#3157](https://github.com/actions/actions-runner-controller/pull/3157) + ### v0.9.1 #### Major changes