Add golangci-lilnt to CI (#1794)

This introduces a linter to PRs to help with code reviews and code hygiene. I've also gone ahead and fixed (or ignored) the existing lints.

I've only setup the default linters right now. There are many more options that are documented at https://golangci-lint.run/.

The GitHub Action should add appropriate annotations to the lint job for the PR. Contributors can also lint locally using `make lint`.
This commit is contained in:
Cory Miller 2022-09-20 20:08:22 -04:00 committed by GitHub
parent 718232f8f4
commit c91e76f169
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 63 additions and 87 deletions

23
.github/workflows/golangci-lint.yaml vendored Normal file
View File

@ -0,0 +1,23 @@
name: golangci-lint
on:
push:
branches:
- master
pull_request:
permissions:
contents: read
pull-requests: read
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
with:
go-version: 1.19
- uses: actions/checkout@v3
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
only-new-issues: true
version: v1.49.0

17
.golangci.yaml Normal file
View File

@ -0,0 +1,17 @@
run:
timeout: 3m
output:
format: github-actions
linters-settings:
errcheck:
exclude-functions:
- (net/http.ResponseWriter).Write
- (*net/http.Server).Shutdown
- (*github.com/actions-runner-controller/actions-runner-controller/simulator.VisibleRunnerGroups).Add
- (*github.com/actions-runner-controller/actions-runner-controller/testing.Kind).Stop
issues:
exclude-rules:
- path: controllers/suite_test.go
linters:
- staticcheck
text: "SA1019"

View File

@ -155,3 +155,7 @@ GINKGO_FOCUS='[It] should create a new Runner resource from the specified templa
#### Helm Version Bumps #### Helm Version Bumps
In general we ask you not to bump the version in your PR, the maintainers in general manage the publishing of a new chart. In general we ask you not to bump the version in your PR, the maintainers in general manage the publishing of a new chart.
#### Running linters locally
To run the `golangci-lint` tool locally, we recommend you use `make lint` to run the tool using a Docker container matching the CI version.

View File

@ -51,6 +51,9 @@ endif
all: manager all: manager
lint:
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.49.0 golangci-lint run
GO_TEST_ARGS ?= -short GO_TEST_ARGS ?= -short
# Run tests # Run tests

View File

@ -334,11 +334,7 @@ func (r Runner) IsRegisterable() bool {
} }
now := metav1.Now() now := metav1.Now()
if r.Status.Registration.ExpiresAt.Before(&now) { return !r.Status.Registration.ExpiresAt.Before(&now)
return false
}
return true
} }
// +kubebuilder:object:root=true // +kubebuilder:object:root=true

View File

@ -79,7 +79,6 @@ func (s *batchScaler) Add(st *ScaleTarget) {
for { for {
select { select {
case <-after: case <-after:
after = nil
break batch break batch
case st := <-s.queue: case st := <-s.queue:
nsName := types.NamespacedName{ nsName := types.NamespacedName{

View File

@ -20,7 +20,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
@ -75,10 +75,8 @@ type HorizontalRunnerAutoscalerGitHubWebhook struct {
// A scale target is enqueued on each retrieval of each eligible webhook event, so that it is processed asynchronously. // A scale target is enqueued on each retrieval of each eligible webhook event, so that it is processed asynchronously.
QueueLimit int QueueLimit int
worker *worker worker *worker
workerInit sync.Once workerInit sync.Once
workerStart sync.Once
batchCh chan *ScaleTarget
} }
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) { func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
@ -133,7 +131,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
return return
} }
} else { } else {
payload, err = ioutil.ReadAll(r.Body) payload, err = io.ReadAll(r.Body)
if err != nil { if err != nil {
autoscaler.Log.Error(err, "error reading request body") autoscaler.Log.Error(err, "error reading request body")

View File

@ -5,7 +5,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
@ -504,7 +503,7 @@ func testServerWithInitObjs(t *testing.T, eventType string, event interface{}, w
hraWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{} hraWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{}
client := fake.NewFakeClientWithScheme(sc, initObjs...) client := fake.NewClientBuilder().WithScheme(sc).WithRuntimeObjects(initObjs...).Build()
logs := installTestLogger(hraWebhook) logs := installTestLogger(hraWebhook)
@ -537,7 +536,7 @@ func testServerWithInitObjs(t *testing.T, eventType string, event interface{}, w
t.Error("status:", resp.StatusCode) t.Error("status:", resp.StatusCode)
} }
respBody, err := ioutil.ReadAll(resp.Body) respBody, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -575,7 +574,7 @@ func sendWebhook(server *httptest.Server, eventType string, event interface{}) (
"X-GitHub-Event": {eventType}, "X-GitHub-Event": {eventType},
"Content-Type": {"application/json"}, "Content-Type": {"application/json"},
}, },
Body: ioutil.NopCloser(bytes.NewBuffer(reqBody)), Body: io.NopCloser(bytes.NewBuffer(reqBody)),
} }
return http.DefaultClient.Do(req) return http.DefaultClient.Do(req)
@ -607,7 +606,7 @@ func (l *testLogSink) Info(_ int, msg string, kvs ...interface{}) {
fmt.Fprintf(l.writer, "\n") fmt.Fprintf(l.writer, "\n")
} }
func (_ *testLogSink) Enabled(level int) bool { func (*testLogSink) Enabled(level int) bool {
return true return true
} }

View File

@ -10,12 +10,6 @@ const (
rsNamespace = "namespace" rsNamespace = "namespace"
) )
var (
runnerSetMetrics = []prometheus.Collector{
runnerSetReplicas,
}
)
var ( var (
runnerSetReplicas = prometheus.NewGaugeVec( runnerSetReplicas = prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{

View File

@ -151,14 +151,6 @@ func (c *MultiGitHubClient) DeinitForRunnerSet(rs *v1alpha1.RunnerSet) {
c.derefClient(rs.Namespace, secretName, refFromRunnerSet(rs)) c.derefClient(rs.Namespace, secretName, refFromRunnerSet(rs))
} }
func (c *MultiGitHubClient) deinitClientForRunnerReplicaSet(rs *v1alpha1.RunnerReplicaSet) {
c.derefClient(rs.Namespace, rs.Spec.Template.Spec.GitHubAPICredentialsFrom.SecretRef.Name, refFromRunnerReplicaSet(rs))
}
func (c *MultiGitHubClient) deinitClientForRunnerDeployment(rd *v1alpha1.RunnerDeployment) {
c.derefClient(rd.Namespace, rd.Spec.Template.Spec.GitHubAPICredentialsFrom.SecretRef.Name, refFromRunnerDeployment(rd))
}
func (c *MultiGitHubClient) DeinitForHRA(hra *v1alpha1.HorizontalRunnerAutoscaler) { func (c *MultiGitHubClient) DeinitForHRA(hra *v1alpha1.HorizontalRunnerAutoscaler) {
var secretName string var secretName string
if hra.Spec.GitHubAPICredentialsFrom != nil { if hra.Spec.GitHubAPICredentialsFrom != nil {
@ -310,22 +302,6 @@ func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, err
return &conf, nil return &conf, nil
} }
func refFromRunnerDeployment(rd *v1alpha1.RunnerDeployment) *runnerOwnerRef {
return &runnerOwnerRef{
kind: rd.Kind,
ns: rd.Namespace,
name: rd.Name,
}
}
func refFromRunnerReplicaSet(rs *v1alpha1.RunnerReplicaSet) *runnerOwnerRef {
return &runnerOwnerRef{
kind: rs.Kind,
ns: rs.Namespace,
name: rs.Name,
}
}
func refFromRunner(r *v1alpha1.Runner) *runnerOwnerRef { func refFromRunner(r *v1alpha1.Runner) *runnerOwnerRef {
return &runnerOwnerRef{ return &runnerOwnerRef{
kind: r.Kind, kind: r.Kind,

View File

@ -715,7 +715,7 @@ func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
}, },
}, },
}, },
corev1.EnvVar{ {
Name: "ACTIONS_RUNNER_REQUIRE_SAME_NODE", Name: "ACTIONS_RUNNER_REQUIRE_SAME_NODE",
Value: strconv.FormatBool(isRequireSameNode), Value: strconv.FormatBool(isRequireSameNode),
}, },
@ -834,7 +834,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
if dockerdContainer != nil { if dockerdContainer != nil {
template.Spec.Containers = append(template.Spec.Containers[:dockerdContainerIndex], template.Spec.Containers[dockerdContainerIndex+1:]...) template.Spec.Containers = append(template.Spec.Containers[:dockerdContainerIndex], template.Spec.Containers[dockerdContainerIndex+1:]...)
} }
if runnerContainerIndex < runnerContainerIndex { if dockerdContainerIndex < runnerContainerIndex {
runnerContainerIndex-- runnerContainerIndex--
} }
dockerdContainer = nil dockerdContainer = nil
@ -1226,13 +1226,3 @@ func isRequireSameNode(pod *corev1.Pod) (bool, error) {
} }
return false, nil return false, nil
} }
func overwriteRunnerEnv(runner *v1alpha1.Runner, key string, value string) {
for i := range runner.Spec.Env {
if runner.Spec.Env[i].Name == key {
runner.Spec.Env[i].Value = value
return
}
}
runner.Spec.Env = append(runner.Spec.Env, corev1.EnvVar{Name: key, Value: value})
}

View File

@ -315,7 +315,7 @@ func syncRunnerPodsOwners(ctx context.Context, c client.Client, log logr.Logger,
numOwners := len(owners) numOwners := len(owners)
var hashes []string var hashes []string
for h, _ := range state.podsForOwners { for h := range state.podsForOwners {
hashes = append(hashes, h) hashes = append(hashes, h)
} }

View File

@ -47,7 +47,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
status := req.URL.Query().Get("status") status := req.URL.Query().Get("status")
if h.Statuses != nil { if h.Statuses != nil {
if body, ok := h.Statuses[status]; ok { if body, ok := h.Statuses[status]; ok {
fmt.Fprintf(w, body) fmt.Fprint(w, body)
return return
} }
} }
@ -69,7 +69,7 @@ func (h *MapHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(404) w.WriteHeader(404)
} else { } else {
w.WriteHeader(h.Status) w.WriteHeader(h.Status)
fmt.Fprintf(w, body) fmt.Fprint(w, body)
} }
} }

View File

@ -41,18 +41,3 @@ func (c *Simulator) GetRunnerGroupsVisibleToRepository(ctx context.Context, org,
return visible, nil return visible, nil
} }
func (c *Simulator) hasRepoAccessToOrganizationRunnerGroup(ctx context.Context, org string, runnerGroupId int64, repo string) (bool, error) {
repos, err := c.Client.ListRunnerGroupRepositoryAccesses(ctx, org, runnerGroupId)
if err != nil {
return false, err
}
for _, githubRepo := range repos {
if githubRepo.GetFullName() == repo {
return true, nil
}
}
return false, nil
}

View File

@ -56,8 +56,3 @@ func delete(cmName string) ([]byte, error) {
cmd := exec.Command("kubectl", "delete", "cm", cmName) cmd := exec.Command("kubectl", "delete", "cm", cmName)
return cmd.CombinedOutput() return cmd.CombinedOutput()
} }
func deleteControllerManagerSecret() ([]byte, error) {
cmd := exec.Command("kubectl", "-n", "actions-runner-system", "delete", "secret", "controller-manager")
return cmd.CombinedOutput()
}

View File

@ -616,9 +616,6 @@ func (e *env) checkGitHubToken(t *testing.T, tok string) error {
return nil return nil
} }
func (e *env) f() {
}
func (e *env) buildAndLoadImages(t *testing.T) { func (e *env) buildAndLoadImages(t *testing.T) {
t.Helper() t.Helper()
@ -1070,7 +1067,7 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job, ti
var expected []string var expected []string
for _ = range testJobs { for range testJobs {
expected = append(expected, "ok") expected = append(expected, "ok")
} }

View File

@ -124,7 +124,7 @@ func (k *Kubectl) kubectlCmd(ctx context.Context, c string, args []string, cfg K
} }
if cfg.Timeout > 0 { if cfg.Timeout > 0 {
args = append(args, "--timeout="+fmt.Sprintf("%s", cfg.Timeout)) args = append(args, fmt.Sprintf("--timeout=%v", cfg.Timeout.String()))
} }
cmd := exec.CommandContext(ctx, "kubectl", args...) cmd := exec.CommandContext(ctx, "kubectl", args...)