cleanup some unused code and minor refactors (#1274)

Co-authored-by: Yusuke Kuoka <ykuoka@gmail.com>
This commit is contained in:
Felipe Galindo Sanchez 2022-05-16 02:38:32 -07:00 committed by GitHub
parent bf45aa9f6b
commit 78c01fd31d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 6 additions and 16 deletions

View File

@ -47,8 +47,6 @@ const (
// A pod that is timed out can be terminated if needed.
registrationTimeout = 10 * time.Minute
defaultRegistrationCheckInterval = time.Minute
// DefaultRunnerPodRecreationDelayAfterWebhookScale is the delay until syncing the runners with the desired replicas
// after a webhook-based scale up.
// This is used to prevent ARC from recreating completed runner pods that are deleted soon without being used at all.

View File

@ -15,10 +15,6 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event
push := g.Push
if push == nil {
return false
}
return true
return push != nil
}
}

View File

@ -134,7 +134,7 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
"lastState.message", lts.Message,
"pod.phase", pod.Status.Phase,
)
} else if ok, err := unregisterRunner(ctx, ghClient, enterprise, organization, repository, runner, *runnerID); err != nil {
} else if ok, err := unregisterRunner(ctx, ghClient, enterprise, organization, repository, *runnerID); err != nil {
if errors.Is(err, &gogithub.RateLimitError{}) {
// We log the underlying error when we failed calling GitHub API to list or unregisters,
// or the runner is still busy.
@ -193,7 +193,7 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
// So we can just wait for the completion without actively retrying unregistration.
ephemeral := getRunnerEnv(pod, EnvVarEphemeral)
if ephemeral == "true" {
pod, err = annotatePodOnce(ctx, c, log, pod, AnnotationKeyRunnerCompletionWaitStartTimestamp, time.Now().Format(time.RFC3339))
_, err = annotatePodOnce(ctx, c, log, pod, AnnotationKeyRunnerCompletionWaitStartTimestamp, time.Now().Format(time.RFC3339))
if err != nil {
return &ctrl.Result{}, err
}
@ -370,7 +370,7 @@ func setRunnerEnv(pod *corev1.Pod, key, value string) {
// There isn't a single right grace period that works for everyone.
// The longer the grace period is, the earlier a cluster resource shortage can occur due to throttoled runner pod deletions,
// while the shorter the grace period is, the more likely you may encounter the race issue.
func unregisterRunner(ctx context.Context, client *github.Client, enterprise, org, repo, name string, id int64) (bool, error) {
func unregisterRunner(ctx context.Context, client *github.Client, enterprise, org, repo string, id int64) (bool, error) {
// For the record, historically ARC did not try to call RemoveRunner on a busy runner, but it's no longer true.
// The reason ARC did so was to let a runner running a job to not stop prematurely.
//

View File

@ -421,9 +421,7 @@ func getSelector(rd *v1alpha1.RunnerDeployment) *metav1.LabelSelector {
func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []string, scheme *runtime.Scheme) (*v1alpha1.RunnerReplicaSet, error) {
newRSTemplate := *rd.Spec.Template.DeepCopy()
for _, l := range commonRunnerLabels {
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, l)
}
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, commonRunnerLabels...)
templateHash := ComputeHash(&newRSTemplate)

View File

@ -188,9 +188,7 @@ var LabelValuePodMutation = "true"
func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
for _, l := range r.CommonRunnerLabels {
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, l)
}
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
template := corev1.Pod{
ObjectMeta: runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta,