Fix various golangci-lint errors (#2147)
that we introduced via controller-runtime upgrade and via the removal of legacy pull-based scale triggers (#2001).
This commit is contained in:
		
							parent
							
								
									a6c4d84234
								
							
						
					
					
						commit
						bc4f4fee12
					
				|  | @ -22,7 +22,6 @@ import ( | |||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| 
 | ||||
|  | @ -333,24 +332,6 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx con | |||
| 	return hras, nil | ||||
| } | ||||
| 
 | ||||
| func matchTriggerConditionAgainstEvent(types []string, eventAction *string) bool { | ||||
| 	if len(types) == 0 { | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	if eventAction == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	for _, tpe := range types { | ||||
| 		if tpe == *eventAction { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| type ScaleTarget struct { | ||||
| 	v1alpha1.HorizontalRunnerAutoscaler | ||||
| 	v1alpha1.ScaleUpTrigger | ||||
|  | @ -358,72 +339,6 @@ type ScaleTarget struct { | |||
| 	log *logr.Logger | ||||
| } | ||||
| 
 | ||||
| func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget { | ||||
| 	var matched []ScaleTarget | ||||
| 
 | ||||
| 	for _, hra := range hras { | ||||
| 		if !hra.ObjectMeta.DeletionTimestamp.IsZero() { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		for _, scaleUpTrigger := range hra.Spec.ScaleUpTriggers { | ||||
| 			if !f(scaleUpTrigger) { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			matched = append(matched, ScaleTarget{ | ||||
| 				HorizontalRunnerAutoscaler: hra, | ||||
| 				ScaleUpTrigger:             scaleUpTrigger, | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return matched | ||||
| } | ||||
| 
 | ||||
| func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx context.Context, name string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) { | ||||
| 	hras, err := autoscaler.findHRAsByKey(ctx, name) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	autoscaler.Log.V(1).Info(fmt.Sprintf("Found %d HRAs by key", len(hras)), "key", name) | ||||
| 
 | ||||
| 	targets := autoscaler.searchScaleTargets(hras, f) | ||||
| 
 | ||||
| 	n := len(targets) | ||||
| 
 | ||||
| 	if n == 0 { | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 
 | ||||
| 	if n > 1 { | ||||
| 		var scaleTargetIDs []string | ||||
| 
 | ||||
| 		for _, t := range targets { | ||||
| 			scaleTargetIDs = append(scaleTargetIDs, t.HorizontalRunnerAutoscaler.Name) | ||||
| 		} | ||||
| 
 | ||||
| 		autoscaler.Log.Info( | ||||
| 			"Found too many scale targets: "+ | ||||
| 				"It must be exactly one to avoid ambiguity. "+ | ||||
| 				"Either set Namespace for the webhook-based autoscaler to let it only find HRAs in the namespace, "+ | ||||
| 				"or update Repository, Organization, or Enterprise fields in your RunnerDeployment resources to fix the ambiguity.", | ||||
| 			"scaleTargets", strings.Join(scaleTargetIDs, ",")) | ||||
| 
 | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 
 | ||||
| 	return &targets[0], nil | ||||
| } | ||||
| 
 | ||||
| func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx context.Context, log logr.Logger, repo, owner, ownerType, enterprise string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) { | ||||
| 	scaleTarget := func(value string) (*ScaleTarget, error) { | ||||
| 		return autoscaler.getScaleTarget(ctx, value, f) | ||||
| 	} | ||||
| 	return autoscaler.getScaleUpTargetWithFunction(ctx, log, repo, owner, ownerType, enterprise, scaleTarget) | ||||
| } | ||||
| 
 | ||||
| func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleUpTargetForRepoOrOrg( | ||||
| 	ctx context.Context, log logr.Logger, repo, owner, ownerType, enterprise string, labels []string, | ||||
| ) (*ScaleTarget, error) { | ||||
|  |  | |||
|  | @ -40,9 +40,6 @@ var ( | |||
| 	workflowRunsFor3Replicas             = `{"total_count": 5, "workflow_runs":[{"status":"queued"}, {"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"` | ||||
| 	workflowRunsFor3Replicas_queued      = `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"queued"}]}"` | ||||
| 	workflowRunsFor3Replicas_in_progress = `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"` | ||||
| 	workflowRunsFor1Replicas             = `{"total_count": 6, "workflow_runs":[{"status":"queued"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}]}"` | ||||
| 	workflowRunsFor1Replicas_queued      = `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"` | ||||
| 	workflowRunsFor1Replicas_in_progress = `{"total_count": 0, "workflow_runs":[]}"` | ||||
| ) | ||||
| 
 | ||||
| // SetupIntegrationTest will set up a testing environment.
 | ||||
|  |  | |||
|  | @ -57,19 +57,16 @@ func TestAPIs(t *testing.T) { | |||
| var _ = BeforeSuite(func(done Done) { | ||||
| 	logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) | ||||
| 
 | ||||
| 	var apiServerFlags []string | ||||
| 
 | ||||
| 	apiServerFlags = append(apiServerFlags, envtest.DefaultKubeAPIServerFlags...) | ||||
| 	// Avoids the following error:
 | ||||
| 	// 2021-03-19T15:14:11.673+0900    ERROR   controller-runtime.controller   Reconciler error      {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"}
 | ||||
| 	apiServerFlags = append(apiServerFlags, "--allow-privileged=true") | ||||
| 
 | ||||
| 	By("bootstrapping test environment") | ||||
| 	testEnv = &envtest.Environment{ | ||||
| 		CRDDirectoryPaths:  []string{filepath.Join("../..", "config", "crd", "bases")}, | ||||
| 		KubeAPIServerFlags: apiServerFlags, | ||||
| 		CRDDirectoryPaths: []string{filepath.Join("../..", "config", "crd", "bases")}, | ||||
| 	} | ||||
| 
 | ||||
| 	// Avoids the following error:
 | ||||
| 	// 2021-03-19T15:14:11.673+0900    ERROR   controller-runtime.controller   Reconciler error      {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"}
 | ||||
| 	testEnv.ControlPlane.GetAPIServer().Configure(). | ||||
| 		Append("allow-privileged", "true") | ||||
| 
 | ||||
| 	var err error | ||||
| 	cfg, err = testEnv.Start() | ||||
| 	Expect(err).ToNot(HaveOccurred()) | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue