From 50038fba61cd96d039e9043586c005ec8393c7be Mon Sep 17 00:00:00 2001 From: Nikola Jokic Date: Tue, 16 Dec 2025 13:26:44 +0100 Subject: [PATCH] Re-schedule if the failed reason starts with `OutOf` (#4336) --- .../ephemeralrunner_controller.go | 110 +++++++++++------- .../ephemeralrunner_controller_test.go | 46 ++++++++ 2 files changed, 113 insertions(+), 43 deletions(-) diff --git a/controllers/actions.github.com/ephemeralrunner_controller.go b/controllers/actions.github.com/ephemeralrunner_controller.go index 80a012be..40d25ad3 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller.go +++ b/controllers/actions.github.com/ephemeralrunner_controller.go @@ -312,26 +312,45 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ cs := runnerContainerStatus(pod) switch { - case cs == nil: - // starting, no container state yet - log.Info("Waiting for runner container status to be available") - return ctrl.Result{}, nil - case cs.State.Terminated == nil: // still running or evicted - if pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == "Evicted" { - log.Info("Pod set the termination phase, but container state is not terminated. Deleting pod", - "PodPhase", pod.Status.Phase, - "PodReason", pod.Status.Reason, - "PodMessage", pod.Status.Message, + case pod.Status.Phase == corev1.PodFailed: // All containers are stopped + switch { + case pod.Status.Reason == "Evicted": + log.Info("Pod evicted; Deleting ephemeral runner or pod", + "podPhase", pod.Status.Phase, + "podReason", pod.Status.Reason, + "podMessage", pod.Status.Message, ) - if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil { - log.Error(err, "failed to delete pod as failed on pod.Status.Phase: Failed") + return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log) + + case strings.HasPrefix(pod.Status.Reason, "OutOf"): // most likely a transient issue. + log.Info("Pod failed with reason starting with OutOf. Deleting ephemeral runner or pod", + "podPhase", pod.Status.Phase, + "podReason", pod.Status.Reason, + "podMessage", pod.Status.Message, + ) + return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log) + + default: + log.Info("Pod is in failed phase; updating ephemeral runner status", + "podPhase", pod.Status.Phase, + "podReason", pod.Status.Reason, + "podMessage", pod.Status.Message, + ) + if err := r.updateRunStatusFromPod(ctx, ephemeralRunner, pod, log); err != nil { + log.Info("Failed to update ephemeral runner status. Requeue to not miss this event") return ctrl.Result{}, err } return ctrl.Result{}, nil } - log.Info("Ephemeral runner container is still running") + case cs == nil: + // starting, no container state yet + log.Info("Waiting for runner container status to be available") + return ctrl.Result{}, nil + + case cs.State.Terminated == nil: // container is not terminated and pod phase is not failed, so runner is still running + log.Info("Runner container is still running; updating ephemeral runner status") if err := r.updateRunStatusFromPod(ctx, ephemeralRunner, pod, log); err != nil { log.Info("Failed to update ephemeral runner status. Requeue to not miss this event") return ctrl.Result{}, err @@ -340,36 +359,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ case cs.State.Terminated.ExitCode != 0: // failed log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode) - if ephemeralRunner.HasJob() { - log.Error( - errors.New("ephemeral runner has a job assigned, but the pod has failed"), - "Ephemeral runner either has faulty entrypoint or something external killing the runner", - ) - log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed") - if err := r.Delete(ctx, ephemeralRunner); err != nil { - log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed") - return ctrl.Result{}, err - } - - log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed") - log.Info("Trying to remove the runner from the service") - actionsClient, err := r.GetActionsService(ctx, ephemeralRunner) - if err != nil { - log.Error(err, "Failed to get actions client for removing the runner from the service") - return ctrl.Result{}, nil - } - if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil { - log.Error(err, "Failed to remove the runner from the service") - return ctrl.Result{}, nil - } - log.Info("Removed the runner from the service") - return ctrl.Result{}, nil - } - if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil { - log.Error(err, "Failed to delete runner pod on failure") - return ctrl.Result{}, err - } - return ctrl.Result{}, nil + return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log) default: // succeeded log.Info("Ephemeral runner has finished successfully, deleting ephemeral runner", "exitCode", cs.State.Terminated.ExitCode) @@ -381,6 +371,40 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ } } +func (r *EphemeralRunnerReconciler) deleteEphemeralRunnerOrPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error { + if ephemeralRunner.HasJob() { + log.Error( + errors.New("ephemeral runner has a job assigned, but the pod has failed"), + "Ephemeral runner either has faulty entrypoint or something external killing the runner", + ) + log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed") + if err := r.Delete(ctx, ephemeralRunner); err != nil { + log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed") + return err + } + + log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed") + log.Info("Trying to remove the runner from the service") + actionsClient, err := r.GetActionsService(ctx, ephemeralRunner) + if err != nil { + log.Error(err, "Failed to get actions client for removing the runner from the service") + return nil + } + if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil { + log.Error(err, "Failed to remove the runner from the service") + return nil + } + log.Info("Removed the runner from the service") + return nil + } + if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil { + log.Error(err, "Failed to delete runner pod on failure") + return err + } + + return nil +} + func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ok bool, err error) { if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil { actionsError := &actions.ActionsError{} diff --git a/controllers/actions.github.com/ephemeralrunner_controller_test.go b/controllers/actions.github.com/ephemeralrunner_controller_test.go index a19f272f..129dcd17 100644 --- a/controllers/actions.github.com/ephemeralrunner_controller_test.go +++ b/controllers/actions.github.com/ephemeralrunner_controller_test.go @@ -745,6 +745,52 @@ var _ = Describe("EphemeralRunner", func() { ).Should(BeEquivalentTo(true)) }) + It("It should re-create pod on reason starting with OutOf", func() { + pod := new(corev1.Pod) + Eventually( + func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) + if err != nil { + return false, err + } + return true, nil + }, + ephemeralRunnerTimeout, + ephemeralRunnerInterval, + ).Should(BeEquivalentTo(true)) + + pod.Status.Phase = corev1.PodFailed + pod.Status.Reason = "OutOfpods" + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ + Name: v1alpha1.EphemeralRunnerContainerName, + State: corev1.ContainerState{}, + }) + err := k8sClient.Status().Update(ctx, pod) + Expect(err).To(BeNil(), "failed to patch pod status") + + updated := new(v1alpha1.EphemeralRunner) + Eventually(func() (bool, error) { + err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) + if err != nil { + return false, err + } + return len(updated.Status.Failures) == 1, nil + }, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true)) + + // should re-create after failure + Eventually( + func() (bool, error) { + pod := new(corev1.Pod) + if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { + return false, err + } + return true, nil + }, + ephemeralRunnerTimeout, + ephemeralRunnerInterval, + ).Should(BeEquivalentTo(true)) + }) + It("It should not set the phase to succeeded without pod termination status", func() { pod := new(corev1.Pod) Eventually(