chore: goinstrument
This commit is contained in:
parent
a340bfa790
commit
7bcc5c3448
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
|
|
@ -105,6 +106,9 @@ func New(config config.Config) (*App, error) {
|
|||
}
|
||||
|
||||
func (app *App) Run(ctx context.Context) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "App.Run")
|
||||
defer span.End()
|
||||
|
||||
var errs []error
|
||||
if app.worker == nil {
|
||||
errs = append(errs, fmt.Errorf("worker not initialized"))
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
// Listener is an autogenerated mock type for the Listener type
|
||||
|
|
@ -16,6 +17,9 @@ type Listener struct {
|
|||
|
||||
// Listen provides a mock function with given fields: ctx, handler
|
||||
func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, handler)
|
||||
|
||||
var r0 error
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ package mocks
|
|||
|
||||
import (
|
||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
context "context"
|
||||
|
||||
|
|
@ -17,6 +18,9 @@ type Worker struct {
|
|||
|
||||
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
|
||||
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleDesiredRunnerCount")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, count, acquireCount)
|
||||
|
||||
var r0 int
|
||||
|
|
@ -41,6 +45,9 @@ func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acqui
|
|||
|
||||
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
||||
func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleJobStarted")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, jobInfo)
|
||||
|
||||
var r0 error
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -125,6 +126,9 @@ type Handler interface {
|
|||
// The handler is responsible for handling the initial message and subsequent messages.
|
||||
// If an error occurs during any step, Listen returns an error.
|
||||
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen")
|
||||
defer span.End()
|
||||
|
||||
if err := l.createSession(ctx); err != nil {
|
||||
return fmt.Errorf("createSession failed: %w", err)
|
||||
}
|
||||
|
|
@ -182,6 +186,9 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error {
|
|||
}
|
||||
|
||||
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.handleMessage")
|
||||
defer span.End()
|
||||
|
||||
parsedMsg, err := l.parseMessage(ctx, msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse message: %w", err)
|
||||
|
|
@ -223,6 +230,9 @@ func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *acti
|
|||
}
|
||||
|
||||
func (l *Listener) createSession(ctx context.Context) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.createSession")
|
||||
defer span.End()
|
||||
|
||||
var session *actions.RunnerScaleSetSession
|
||||
var retries int
|
||||
|
||||
|
|
@ -268,6 +278,9 @@ func (l *Listener) createSession(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.getMessage")
|
||||
defer span.End()
|
||||
|
||||
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
|
||||
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
|
||||
if err == nil { // if NO error
|
||||
|
|
@ -294,6 +307,9 @@ func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessa
|
|||
}
|
||||
|
||||
func (l *Listener) deleteLastMessage(ctx context.Context) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.deleteLastMessage")
|
||||
defer span.End()
|
||||
|
||||
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
|
||||
err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
|
||||
if err == nil { // if NO error
|
||||
|
|
@ -325,6 +341,9 @@ type parsedMessage struct {
|
|||
}
|
||||
|
||||
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.parseMessage")
|
||||
defer span.End()
|
||||
|
||||
if msg.MessageType != "RunnerScaleSetJobMessages" {
|
||||
l.logger.Info("Skipping message", "messageType", msg.MessageType)
|
||||
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
|
||||
|
|
@ -398,6 +417,9 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
|||
}
|
||||
|
||||
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.acquireAvailableJobs")
|
||||
defer span.End()
|
||||
|
||||
ids := make([]int64, 0, len(jobsAvailable))
|
||||
for _, job := range jobsAvailable {
|
||||
ids = append(ids, job.RunnerRequestId)
|
||||
|
|
@ -428,6 +450,9 @@ func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*ac
|
|||
}
|
||||
|
||||
func (l *Listener) refreshSession(ctx context.Context) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.refreshSession")
|
||||
defer span.End()
|
||||
|
||||
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
|
||||
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
context "context"
|
||||
|
||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
|
|
@ -19,6 +20,9 @@ type Client struct {
|
|||
|
||||
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
|
||||
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.AcquireJobs")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
||||
|
||||
var r0 []int64
|
||||
|
|
@ -45,6 +49,9 @@ func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, message
|
|||
|
||||
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
|
||||
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.CreateMessageSession")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, runnerScaleSetId, owner)
|
||||
|
||||
var r0 *actions.RunnerScaleSetSession
|
||||
|
|
@ -71,6 +78,9 @@ func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int
|
|||
|
||||
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
|
||||
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.DeleteMessage")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
|
||||
|
||||
var r0 error
|
||||
|
|
@ -85,6 +95,9 @@ func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, mes
|
|||
|
||||
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
||||
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.DeleteMessageSession")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
||||
|
||||
var r0 error
|
||||
|
|
@ -99,6 +112,9 @@ func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int
|
|||
|
||||
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
|
||||
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.GetAcquirableJobs")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, runnerScaleSetId)
|
||||
|
||||
var r0 *actions.AcquirableJobList
|
||||
|
|
@ -125,6 +141,9 @@ func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (
|
|||
|
||||
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
|
||||
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.GetMessage")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
|
||||
|
||||
var r0 *actions.RunnerScaleSetMessage
|
||||
|
|
@ -151,6 +170,9 @@ func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messag
|
|||
|
||||
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
||||
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.RefreshMessageSession")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
||||
|
||||
var r0 *actions.RunnerScaleSetSession
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
context "context"
|
||||
|
||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
|
@ -17,6 +18,9 @@ type Handler struct {
|
|||
|
||||
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
|
||||
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Handler.HandleDesiredRunnerCount")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, count, jobsCompleted)
|
||||
|
||||
var r0 int
|
||||
|
|
@ -41,6 +45,9 @@ func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobs
|
|||
|
||||
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
||||
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Handler.HandleJobStarted")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, jobInfo)
|
||||
|
||||
var r0 error
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -336,6 +337,9 @@ func NewExporter(config ExporterConfig) ServerPublisher {
|
|||
}
|
||||
|
||||
func (e *exporter) ListenAndServe(ctx context.Context) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "exporter.ListenAndServe")
|
||||
defer span.End()
|
||||
|
||||
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
context "context"
|
||||
|
||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
|
@ -17,6 +18,9 @@ type ServerPublisher struct {
|
|||
|
||||
// ListenAndServe provides a mock function with given fields: ctx
|
||||
func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "ServerPublisher.ListenAndServe")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx)
|
||||
|
||||
var r0 error
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/actions/actions-runner-controller/logging"
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"github.com/go-logr/logr"
|
||||
"go.opentelemetry.io/otel"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
|
@ -96,6 +97,9 @@ func (w *Worker) applyDefaults() error {
|
|||
// about the ephemeral runner that should not be deleted when scaling down.
|
||||
// It returns an error if there is any issue with updating the job information.
|
||||
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleJobStarted")
|
||||
defer span.End()
|
||||
|
||||
w.logger.Info("Updating job info for the runner",
|
||||
"runnerName", jobInfo.RunnerName,
|
||||
"ownerName", jobInfo.OwnerName,
|
||||
|
|
@ -164,6 +168,9 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
|
|||
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
|
||||
// If any error occurs during the process, it returns an error with a descriptive message.
|
||||
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleDesiredRunnerCount")
|
||||
defer span.End()
|
||||
|
||||
patchID := w.setDesiredWorkerState(count, jobsCompleted)
|
||||
|
||||
original, err := json.Marshal(
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"github.com/go-logr/logr"
|
||||
"go.opentelemetry.io/otel"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
|
@ -38,6 +39,9 @@ func NewKubernetesManager(logger *logr.Logger) (*AutoScalerKubernetesManager, er
|
|||
}
|
||||
|
||||
func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerKubernetesManager.ScaleEphemeralRunnerSet")
|
||||
defer span.End()
|
||||
|
||||
original := &v1alpha1.EphemeralRunnerSet{
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: -1,
|
||||
|
|
@ -83,6 +87,9 @@ func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Contex
|
|||
}
|
||||
|
||||
func (k *AutoScalerKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, workflowRunId, jobRequestId int64) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerKubernetesManager.UpdateEphemeralRunnerWithJobInfo")
|
||||
defer span.End()
|
||||
|
||||
original := &v1alpha1.EphemeralRunner{}
|
||||
originalJson, err := json.Marshal(original)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/go-logr/logr"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -38,6 +39,9 @@ func NewAutoScalerClient(
|
|||
runnerScaleSetId int,
|
||||
options ...func(*AutoScalerClient),
|
||||
) (*AutoScalerClient, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "NewAutoScalerClient")
|
||||
defer span.End()
|
||||
|
||||
listener := AutoScalerClient{
|
||||
logger: logger.WithName("auto_scaler"),
|
||||
}
|
||||
|
|
@ -59,6 +63,9 @@ func NewAutoScalerClient(
|
|||
}
|
||||
|
||||
func createSession(ctx context.Context, logger *logr.Logger, client actions.ActionsService, runnerScaleSetId int) (*actions.RunnerScaleSetSession, *actions.RunnerScaleSetMessage, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "createSession")
|
||||
defer span.End()
|
||||
|
||||
hostName, err := os.Hostname()
|
||||
if err != nil {
|
||||
hostName = uuid.New().String()
|
||||
|
|
@ -130,6 +137,9 @@ func (m *AutoScalerClient) Close() error {
|
|||
}
|
||||
|
||||
func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.GetRunnerScaleSetMessage")
|
||||
defer span.End()
|
||||
|
||||
if m.initialMessage != nil {
|
||||
err := handler(m.initialMessage)
|
||||
if err != nil {
|
||||
|
|
@ -162,6 +172,9 @@ func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler
|
|||
}
|
||||
|
||||
func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.deleteMessage")
|
||||
defer span.End()
|
||||
|
||||
err := m.client.DeleteMessage(ctx, messageId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete message failed from refreshing client. %w", err)
|
||||
|
|
@ -172,6 +185,9 @@ func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) e
|
|||
}
|
||||
|
||||
func (m *AutoScalerClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.AcquireJobsForRunnerScaleSet")
|
||||
defer span.End()
|
||||
|
||||
m.logger.Info("acquiring jobs.", "request count", len(requestIds), "requestIds", fmt.Sprint(requestIds))
|
||||
if len(requestIds) == 0 {
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
type ScaleSettings struct {
|
||||
|
|
@ -60,6 +61,9 @@ func NewService(
|
|||
settings *ScaleSettings,
|
||||
options ...func(*Service),
|
||||
) (*Service, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "NewService")
|
||||
defer span.End()
|
||||
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
rsClient: rsClient,
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import (
|
|||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
|
@ -155,6 +156,9 @@ type runOptions struct {
|
|||
}
|
||||
|
||||
func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOptions) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "run")
|
||||
defer span.End()
|
||||
|
||||
// Create root context and hook with sigint and sigterm
|
||||
creds := &actions.ActionsAuth{}
|
||||
if rc.Token != "" {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
// MockKubernetesManager is an autogenerated mock type for the KubernetesManager type
|
||||
|
|
@ -15,6 +16,9 @@ type MockKubernetesManager struct {
|
|||
|
||||
// ScaleEphemeralRunnerSet provides a mock function with given fields: ctx, namespace, resourceName, runnerCount
|
||||
func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace string, resourceName string, runnerCount int) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "MockKubernetesManager.ScaleEphemeralRunnerSet")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, namespace, resourceName, runnerCount)
|
||||
|
||||
var r0 error
|
||||
|
|
@ -29,6 +33,9 @@ func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, na
|
|||
|
||||
// UpdateEphemeralRunnerWithJobInfo provides a mock function with given fields: ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId
|
||||
func (_m *MockKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace string, resourceName string, ownerName string, repositoryName string, jobWorkflowRef string, jobDisplayName string, jobRequestId int64, workflowRunId int64) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "MockKubernetesManager.UpdateEphemeralRunnerWithJobInfo")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId)
|
||||
|
||||
var r0 error
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
context "context"
|
||||
|
||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
|
@ -17,6 +18,9 @@ type MockRunnerScaleSetClient struct {
|
|||
|
||||
// AcquireJobsForRunnerScaleSet provides a mock function with given fields: ctx, requestIds
|
||||
func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "MockRunnerScaleSetClient.AcquireJobsForRunnerScaleSet")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, requestIds)
|
||||
|
||||
var r0 error
|
||||
|
|
@ -31,6 +35,9 @@ func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Con
|
|||
|
||||
// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler, maxCapacity
|
||||
func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "MockRunnerScaleSetClient.GetRunnerScaleSetMessage")
|
||||
defer span.End()
|
||||
|
||||
ret := _m.Called(ctx, handler, maxCapacity)
|
||||
|
||||
var r0 error
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
type SessionRefreshingClient struct {
|
||||
|
|
@ -25,6 +26,9 @@ func newSessionClient(client actions.ActionsService, logger *logr.Logger, sessio
|
|||
}
|
||||
|
||||
func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.GetMessage")
|
||||
defer span.End()
|
||||
|
||||
if maxCapacity < 0 {
|
||||
return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0")
|
||||
}
|
||||
|
|
@ -55,6 +59,9 @@ func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId
|
|||
}
|
||||
|
||||
func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId int64) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.DeleteMessage")
|
||||
defer span.End()
|
||||
|
||||
err := m.client.DeleteMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, messageId)
|
||||
if err == nil {
|
||||
return nil
|
||||
|
|
@ -82,6 +89,9 @@ func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId i
|
|||
}
|
||||
|
||||
func (m *SessionRefreshingClient) AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.AcquireJobs")
|
||||
defer span.End()
|
||||
|
||||
ids, err := m.client.AcquireJobs(ctx, m.session.RunnerScaleSet.Id, m.session.MessageQueueAccessToken, requestIds)
|
||||
if err == nil {
|
||||
return ids, nil
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"go.opentelemetry.io/otel"
|
||||
otelCodes "go.opentelemetry.io/otel/codes"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
@ -70,6 +72,9 @@ type AutoscalingListenerReconciler struct {
|
|||
|
||||
// Reconcile a AutoscalingListener resource to meet its desired spec.
|
||||
func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.Reconcile")
|
||||
defer span.End()
|
||||
|
||||
log := r.Log.WithValues("autoscalinglistener", req.NamespacedName)
|
||||
|
||||
autoscalingListener := new(v1alpha1.AutoscalingListener)
|
||||
|
|
@ -266,6 +271,15 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.cleanupResources")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(otelCodes.Error, "error")
|
||||
span.RecordError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Cleaning up the listener pod")
|
||||
listenerPod := new(corev1.Pod)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
|
||||
|
|
@ -373,6 +387,9 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createServiceAccountForListener")
|
||||
defer span.End()
|
||||
|
||||
newServiceAccount := r.resourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
|
||||
|
|
@ -390,6 +407,9 @@ func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx cont
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createListenerPod")
|
||||
defer span.End()
|
||||
|
||||
var envs []corev1.EnvVar
|
||||
if autoscalingListener.Spec.Proxy != nil {
|
||||
httpURL := corev1.EnvVar{
|
||||
|
|
@ -499,6 +519,9 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (string, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.certificate")
|
||||
defer span.End()
|
||||
|
||||
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom == nil {
|
||||
return "", fmt.Errorf("githubServerTLS.certificateFrom is not specified")
|
||||
}
|
||||
|
|
@ -537,6 +560,9 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createSecretsForListener")
|
||||
defer span.End()
|
||||
|
||||
newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
|
||||
|
|
@ -554,6 +580,9 @@ func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Con
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createProxySecret")
|
||||
defer span.End()
|
||||
|
||||
data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Name: s, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, &secret)
|
||||
|
|
@ -593,6 +622,9 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.updateSecretsForListener")
|
||||
defer span.End()
|
||||
|
||||
dataHash := hash.ComputeTemplateHash(secret.Data)
|
||||
updatedMirrorSecret := mirrorSecret.DeepCopy()
|
||||
updatedMirrorSecret.Labels["secret-data-hash"] = dataHash
|
||||
|
|
@ -609,6 +641,9 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createRoleForListener")
|
||||
defer span.End()
|
||||
|
||||
newRole := r.resourceBuilder.newScaleSetListenerRole(autoscalingListener)
|
||||
|
||||
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
|
||||
|
|
@ -622,6 +657,9 @@ func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Contex
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Context, listenerRole *rbacv1.Role, desiredRules []rbacv1.PolicyRule, desiredRulesHash string, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.updateRoleForListener")
|
||||
defer span.End()
|
||||
|
||||
updatedPatchRole := listenerRole.DeepCopy()
|
||||
updatedPatchRole.Labels["role-policy-rules-hash"] = desiredRulesHash
|
||||
updatedPatchRole.Rules = desiredRules
|
||||
|
|
@ -637,6 +675,9 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createRoleBindingForListener")
|
||||
defer span.End()
|
||||
|
||||
newRoleBinding := r.resourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
||||
|
||||
logger.Info("Creating listener role binding",
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ import (
|
|||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"go.opentelemetry.io/otel"
|
||||
otelCodes "go.opentelemetry.io/otel/codes"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -93,6 +95,9 @@ type AutoscalingRunnerSetReconciler struct {
|
|||
|
||||
// Reconcile a AutoscalingRunnerSet resource to meet its desired spec.
|
||||
func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.Reconcile")
|
||||
defer span.End()
|
||||
|
||||
log := r.Log.WithValues("autoscalingrunnerset", req.NamespacedName)
|
||||
|
||||
autoscalingRunnerSet := new(v1alpha1.AutoscalingRunnerSet)
|
||||
|
|
@ -334,6 +339,15 @@ func (r *AutoscalingRunnerSetReconciler) drainingJobs(latestRunnerSetStatus *v1a
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.cleanupListener")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(otelCodes.Error, "error")
|
||||
span.RecordError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Cleaning up the listener")
|
||||
var listener v1alpha1.AutoscalingListener
|
||||
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
|
||||
|
|
@ -355,6 +369,15 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.cleanupEphemeralRunnerSets")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(otelCodes.Error, "error")
|
||||
span.RecordError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Cleaning up ephemeral runner sets")
|
||||
runnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
|
|
@ -373,6 +396,9 @@ func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.Context, oldRunnerSets []v1alpha1.EphemeralRunnerSet, logger logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.deleteEphemeralRunnerSets")
|
||||
defer span.End()
|
||||
|
||||
for i := range oldRunnerSets {
|
||||
rs := &oldRunnerSets[i]
|
||||
// already deleted but contains finalizer so it still exists
|
||||
|
|
@ -390,6 +416,15 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.removeFinalizersFromDependentResources")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(otelCodes.Error, "error")
|
||||
span.RecordError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
c := autoscalingRunnerSetFinalizerDependencyCleaner{
|
||||
client: r.Client,
|
||||
autoscalingRunnerSet: autoscalingRunnerSet,
|
||||
|
|
@ -413,6 +448,9 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createRunnerScaleSet")
|
||||
defer span.End()
|
||||
|
||||
logger.Info("Creating a new runner scale set")
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 {
|
||||
|
|
@ -504,6 +542,9 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.updateRunnerScaleSetRunnerGroup")
|
||||
defer span.End()
|
||||
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to parse runner scale set ID")
|
||||
|
|
@ -547,6 +588,9 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.updateRunnerScaleSetName")
|
||||
defer span.End()
|
||||
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to parse runner scale set ID")
|
||||
|
|
@ -583,6 +627,9 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.deleteRunnerScaleSet")
|
||||
defer span.End()
|
||||
|
||||
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
|
||||
if !ok {
|
||||
// Annotation not being present can occur in 3 scenarios
|
||||
|
|
@ -634,6 +681,9 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createEphemeralRunnerSet")
|
||||
defer span.End()
|
||||
|
||||
desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not create EphemeralRunnerSet")
|
||||
|
|
@ -656,6 +706,9 @@ func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Co
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createAutoScalingListenerForRunnerSet")
|
||||
defer span.End()
|
||||
|
||||
var imagePullSecrets []corev1.LocalObjectReference
|
||||
for _, imagePullSecret := range r.DefaultRunnerScaleSetListenerImagePullSecrets {
|
||||
imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{
|
||||
|
|
@ -680,6 +733,9 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.listEphemeralRunnerSets")
|
||||
defer span.End()
|
||||
|
||||
list := new(v1alpha1.EphemeralRunnerSetList)
|
||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||
|
|
@ -689,6 +745,9 @@ func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Con
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.actionsClientFor")
|
||||
defer span.End()
|
||||
|
||||
var configSecret corev1.Secret
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
|
||||
return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
|
||||
|
|
@ -709,6 +768,9 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, a
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.actionsClientOptionsFor")
|
||||
defer span.End()
|
||||
|
||||
var options []actions.ClientOption
|
||||
|
||||
if autoscalingRunnerSet.Spec.Proxy != nil {
|
||||
|
|
@ -794,6 +856,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool,
|
|||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeRoleBindingFinalizer")
|
||||
defer span.End()
|
||||
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -838,6 +903,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
|||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeRoleFinalizer")
|
||||
defer span.End()
|
||||
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -881,6 +949,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
|||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeServiceAccountFinalizer")
|
||||
defer span.End()
|
||||
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -925,6 +996,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
|
|||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeNoPermissionServiceAccountFinalizer")
|
||||
defer span.End()
|
||||
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -969,6 +1043,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
|
|||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeGitHubSecretFinalizer")
|
||||
defer span.End()
|
||||
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -1013,6 +1090,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
|
|||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeManagerRoleBindingFinalizer")
|
||||
defer span.End()
|
||||
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -1057,6 +1137,9 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
|
|||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeManagerRoleFinalizer")
|
||||
defer span.End()
|
||||
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package actionsgithubcom
|
|||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
kclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
|
|
@ -16,6 +17,9 @@ type patcher interface {
|
|||
}
|
||||
|
||||
func patch[T object[T]](ctx context.Context, client patcher, obj T, update func(obj T)) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "patch")
|
||||
defer span.End()
|
||||
|
||||
original := obj.DeepCopy()
|
||||
update(obj)
|
||||
return client.Patch(ctx, obj, kclient.MergeFrom(original))
|
||||
|
|
@ -26,6 +30,9 @@ type subResourcePatcher interface {
|
|||
}
|
||||
|
||||
func patchSubResource[T object[T]](ctx context.Context, client subResourcePatcher, obj T, update func(obj T)) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "patchSubResource")
|
||||
defer span.End()
|
||||
|
||||
original := obj.DeepCopy()
|
||||
update(obj)
|
||||
return client.Patch(ctx, obj, kclient.MergeFrom(original))
|
||||
|
|
|
|||
|
|
@ -26,6 +26,8 @@ import (
|
|||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"go.opentelemetry.io/otel"
|
||||
otelCodes "go.opentelemetry.io/otel/codes"
|
||||
"go.uber.org/multierr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -68,6 +70,9 @@ type EphemeralRunnerReconciler struct {
|
|||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile
|
||||
func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.Reconcile")
|
||||
defer span.End()
|
||||
|
||||
log := r.Log.WithValues("ephemeralrunner", req.NamespacedName)
|
||||
|
||||
ephemeralRunner := new(v1alpha1.EphemeralRunner)
|
||||
|
|
@ -294,6 +299,9 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerFromService")
|
||||
defer span.End()
|
||||
|
||||
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
|
|
@ -323,6 +331,15 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (deleted bool, err error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupResources")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(otelCodes.Error, "error")
|
||||
span.RecordError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Cleaning up the runner pod")
|
||||
pod := new(corev1.Pod)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
|
||||
|
|
@ -361,6 +378,15 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupContainerHooksResources")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(otelCodes.Error, "error")
|
||||
span.RecordError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Cleaning up runner linked pods")
|
||||
done, err = r.cleanupRunnerLinkedPods(ctx, ephemeralRunner, log)
|
||||
if err != nil {
|
||||
|
|
@ -381,6 +407,15 @@ func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.C
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerLinkedPods")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(otelCodes.Error, "error")
|
||||
span.RecordError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
runnerLinedLabels := client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": ephemeralRunner.Name,
|
||||
|
|
@ -416,6 +451,15 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerLinkedSecrets")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(otelCodes.Error, "error")
|
||||
span.RecordError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
runnerLinkedLabels := client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": ephemeralRunner.ObjectMeta.Name,
|
||||
|
|
@ -451,6 +495,9 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.markAsFailed")
|
||||
defer span.End()
|
||||
|
||||
log.Info("Updating ephemeral runner status to Failed")
|
||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.Phase = corev1.PodFailed
|
||||
|
|
@ -470,6 +517,9 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.markAsFinished")
|
||||
defer span.End()
|
||||
|
||||
log.Info("Updating ephemeral runner status to Finished")
|
||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.Phase = corev1.PodSucceeded
|
||||
|
|
@ -484,6 +534,9 @@ func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemera
|
|||
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
||||
// It should not be responsible for setting the status to Failed.
|
||||
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.deletePodAsFailed")
|
||||
defer span.End()
|
||||
|
||||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
|
||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||
|
|
@ -511,6 +564,9 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
|||
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
|
||||
// This method should always set .status.runnerId and .status.runnerJITConfig
|
||||
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.updateStatusWithRunnerConfig")
|
||||
defer span.End()
|
||||
|
||||
// Runner is not registered with the service. We need to register it first
|
||||
log.Info("Creating ephemeral runner JIT config")
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
|
|
@ -584,6 +640,9 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.createPod")
|
||||
defer span.End()
|
||||
|
||||
var envs []corev1.EnvVar
|
||||
if runner.Spec.ProxySecretRef != "" {
|
||||
http := corev1.EnvVar{
|
||||
|
|
@ -657,6 +716,9 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.createSecret")
|
||||
defer span.End()
|
||||
|
||||
log.Info("Creating new secret for ephemeral runner")
|
||||
jitSecret := r.resourceBuilder.newEphemeralRunnerJitSecret(runner)
|
||||
|
||||
|
|
@ -679,6 +741,9 @@ func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1
|
|||
// The event should not be re-queued since the termination status should be set
|
||||
// before proceeding with reconciliation logic
|
||||
func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.updateRunStatusFromPod")
|
||||
defer span.End()
|
||||
|
||||
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -702,6 +767,9 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.actionsClientFor")
|
||||
defer span.End()
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||
|
|
@ -722,6 +790,9 @@ func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.actionsClientOptionsFor")
|
||||
defer span.End()
|
||||
|
||||
var opts []actions.ClientOption
|
||||
if runner.Spec.Proxy != nil {
|
||||
proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||
|
|
@ -771,6 +842,15 @@ func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context,
|
|||
// runnerRegisteredWithService checks if the runner is still registered with the service
|
||||
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
|
||||
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.runnerRegisteredWithService")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(otelCodes.Error, "error")
|
||||
span.RecordError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
actionsClient, err := r.actionsClientFor(ctx, runner)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
|
||||
|
|
@ -798,6 +878,9 @@ func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Conte
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.deleteRunnerFromService")
|
||||
defer span.End()
|
||||
|
||||
client, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get actions client for runner: %v", err)
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.uber.org/multierr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -75,6 +76,9 @@ type EphemeralRunnerSetReconciler struct {
|
|||
// be to bring the count of EphemeralRunners to the desired one, not to patch this resource
|
||||
// until it is safe to do so
|
||||
func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.Reconcile")
|
||||
defer span.End()
|
||||
|
||||
log := r.Log.WithValues("ephemeralrunnerset", req.NamespacedName)
|
||||
|
||||
ephemeralRunnerSet := new(v1alpha1.EphemeralRunnerSet)
|
||||
|
|
@ -250,6 +254,9 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx context.Context, finishedEphemeralRunners []*v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanupFinishedEphemeralRunners")
|
||||
defer span.End()
|
||||
|
||||
// cleanup finished runners and proceed
|
||||
var errs []error
|
||||
for i := range finishedEphemeralRunners {
|
||||
|
|
@ -265,6 +272,9 @@ func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx conte
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanUpProxySecret")
|
||||
defer span.End()
|
||||
|
||||
if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -284,6 +294,9 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanUpEphemeralRunners")
|
||||
defer span.End()
|
||||
|
||||
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
|
||||
if err != nil {
|
||||
|
|
@ -357,6 +370,9 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
|
|||
|
||||
// createEphemeralRunners provisions `count` number of v1alpha1.EphemeralRunner resources in the cluster.
|
||||
func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Context, runnerSet *v1alpha1.EphemeralRunnerSet, count int, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.createEphemeralRunners")
|
||||
defer span.End()
|
||||
|
||||
// Track multiple errors at once and return the bundle.
|
||||
errs := make([]error, 0)
|
||||
for i := 0; i < count; i++ {
|
||||
|
|
@ -386,6 +402,9 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.createProxySecret")
|
||||
defer span.End()
|
||||
|
||||
proxySecretData, err := ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
|
||||
secret := new(corev1.Secret)
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunnerSet.Namespace, Name: s}, secret)
|
||||
|
|
@ -431,6 +450,9 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep
|
|||
// When this happens, the next reconcile loop will try to delete the remaining ephemeral runners
|
||||
// after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates.
|
||||
func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.deleteIdleEphemeralRunners")
|
||||
defer span.End()
|
||||
|
||||
if count <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -477,6 +499,9 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.deleteEphemeralRunnerWithActionsClient")
|
||||
defer span.End()
|
||||
|
||||
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
|
|
@ -503,6 +528,9 @@ func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ct
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.actionsClientFor")
|
||||
defer span.End()
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||
|
|
@ -523,6 +551,9 @@ func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs
|
|||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.actionsClientOptionsFor")
|
||||
defer span.End()
|
||||
|
||||
var opts []actions.ClientOption
|
||||
if rs.Spec.EphemeralRunnerSpec.Proxy != nil {
|
||||
proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"go.opentelemetry.io/otel"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -573,6 +574,9 @@ func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
|
|||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
||||
ctx, span := otel.Tracer("arc").Start(ctx, "resourceBuilder.newEphemeralRunnerPod")
|
||||
defer span.End()
|
||||
|
||||
var newPod corev1.Pod
|
||||
|
||||
labels := map[string]string{}
|
||||
|
|
|
|||
17
go.mod
17
go.mod
|
|
@ -4,7 +4,7 @@ go 1.22.1
|
|||
|
||||
require (
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible
|
||||
github.com/go-logr/logr v1.4.1
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
|
|
@ -39,7 +39,7 @@ require (
|
|||
|
||||
require (
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.122 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.327 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
|
|
@ -50,11 +50,12 @@ require (
|
|||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.20.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-sql-driver/mysql v1.4.1 // indirect
|
||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
|
|
@ -65,9 +66,9 @@ require (
|
|||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a // indirect
|
||||
github.com/gruntwork-io/go-commons v0.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
|
|
@ -80,7 +81,7 @@ require (
|
|||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/pquerna/otp v1.2.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.45.0 // indirect
|
||||
|
|
@ -89,6 +90,9 @@ require (
|
|||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/urfave/cli v1.22.2 // indirect
|
||||
go.opentelemetry.io/otel v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
|
|
@ -98,6 +102,7 @@ require (
|
|||
golang.org/x/tools v0.17.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.64.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.28.3 // indirect
|
||||
|
|
|
|||
22
go.sum
22
go.sum
|
|
@ -7,6 +7,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
|
|||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo=
|
||||
github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
|
|
@ -28,6 +29,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||
|
|
@ -44,9 +47,12 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
|
|||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU=
|
||||
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
|
|
@ -59,6 +65,7 @@ github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogB
|
|||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
|
|
@ -111,12 +118,14 @@ github.com/gruntwork-io/terratest v0.46.7 h1:oqGPBBO87SEsvBYaA0R5xOq+Lm2Xc5dmFVf
|
|||
github.com/gruntwork-io/terratest v0.46.7/go.mod h1:6gI5MlLeyF+SLwqocA5GBzcTix+XiuxCy1BPwKuT+WM=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
|
||||
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
|
|
@ -179,6 +188,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok=
|
||||
github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
|
||||
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||
|
|
@ -221,6 +232,12 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
|
|||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
|
||||
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
|
||||
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
|
||||
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
|
||||
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
|
||||
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
|
|
@ -250,6 +267,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
|
|
@ -282,6 +300,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
|
@ -290,6 +309,7 @@ golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
|||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
|
|
@ -333,6 +353,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
|||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.64.0 h1:zXQo6iv+dKRrDBxMXjRXLSKN2lY9uM34XFI4nPyp0eA=
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.64.0/go.mod h1:qzwVu8Qr8CqzQNw2oKEXRdD+fMnjYatjYMGE0tdCVG4=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
|
|
|||
Loading…
Reference in New Issue