upgrade(golangci-lint): v2.1.2 (#4023)
Signed-off-by: karamaru-alpha <mrnk3078@gmail.com>
This commit is contained in:
parent
a33d34a036
commit
f832b0b254
|
|
@ -48,11 +48,11 @@ jobs:
|
|||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
# https://github.com/golangci/golangci-lint-action/releases/tag/v6.5.2
|
||||
uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84
|
||||
# https://github.com/golangci/golangci-lint-action/releases/tag/v7.0.0
|
||||
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v1.64.8
|
||||
version: v2.1.2
|
||||
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
|||
|
|
@ -1,19 +1,14 @@
|
|||
version: "2"
|
||||
run:
|
||||
timeout: 5m
|
||||
output:
|
||||
formats:
|
||||
- format: colored-line-number
|
||||
path: stdout
|
||||
linters-settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- (net/http.ResponseWriter).Write
|
||||
- (*net/http.Server).Shutdown
|
||||
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
|
||||
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: controllers/suite_test.go
|
||||
linters:
|
||||
- staticcheck
|
||||
text: "SA1019"
|
||||
linters:
|
||||
settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- (net/http.ResponseWriter).Write
|
||||
- (*net/http.Server).Shutdown
|
||||
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
|
||||
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop
|
||||
exclusions:
|
||||
presets:
|
||||
- std-error-handling
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -68,7 +68,7 @@ endif
|
|||
all: manager
|
||||
|
||||
lint:
|
||||
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.57.2 golangci-lint run
|
||||
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v2.1.2 golangci-lint run
|
||||
|
||||
GO_TEST_ARGS ?= -short
|
||||
|
||||
|
|
|
|||
|
|
@ -215,10 +215,10 @@ func (rs *RunnerSpec) validateRepository() error {
|
|||
foundCount += 1
|
||||
}
|
||||
if foundCount == 0 {
|
||||
return errors.New("Spec needs enterprise, organization or repository")
|
||||
return errors.New("spec needs enterprise, organization or repository")
|
||||
}
|
||||
if foundCount > 1 {
|
||||
return errors.New("Spec cannot have many fields defined enterprise, organization and repository")
|
||||
return errors.New("spec cannot have many fields defined enterprise, organization and repository")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -1178,7 +1178,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||
}
|
||||
}
|
||||
require.NotNil(t, volume)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
|
||||
|
||||
|
|
@ -1238,7 +1238,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||
}
|
||||
}
|
||||
require.NotNil(t, volume)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
|
||||
|
||||
|
|
@ -1298,7 +1298,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
|||
}
|
||||
}
|
||||
require.NotNil(t, volume)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
|
||||
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ func (e *exporter) ListenAndServe(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float64) {
|
||||
m, ok := e.metrics.gauges[name]
|
||||
m, ok := e.gauges[name]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
|
@ -299,7 +299,7 @@ func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float6
|
|||
}
|
||||
|
||||
func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
|
||||
m, ok := e.metrics.counters[name]
|
||||
m, ok := e.counters[name]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
|
@ -311,7 +311,7 @@ func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
|
|||
}
|
||||
|
||||
func (e *exporter) observeHistogram(name string, allLabels prometheus.Labels, val float64) {
|
||||
m, ok := e.metrics.histograms[name]
|
||||
m, ok := e.histograms[name]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
|
@ -339,7 +339,7 @@ func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
|
|||
l := e.startedJobLabels(msg)
|
||||
e.incCounter(MetricStartedJobsTotal, l)
|
||||
|
||||
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
|
||||
startupDuration := msg.RunnerAssignTime.Unix() - msg.ScaleSetAssignTime.Unix()
|
||||
e.observeHistogram(MetricJobStartupDurationSeconds, l, float64(startupDuration))
|
||||
}
|
||||
|
||||
|
|
@ -347,7 +347,7 @@ func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
|
|||
l := e.completedJobLabels(msg)
|
||||
e.incCounter(MetricCompletedJobsTotal, l)
|
||||
|
||||
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
|
||||
executionDuration := msg.FinishTime.Unix() - msg.RunnerAssignTime.Unix()
|
||||
e.observeHistogram(MetricJobExecutionDurationSeconds, l, float64(executionDuration))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !autoscalingListener.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !autoscalingListener.DeletionTimestamp.IsZero() {
|
||||
if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
|
@ -281,7 +281,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listenerPod.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener pod")
|
||||
if err := r.Delete(ctx, listenerPod); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener pod: %w", err)
|
||||
|
|
@ -299,7 +299,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &secret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if secret.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener config secret")
|
||||
if err := r.Delete(ctx, &secret); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
|
|
@ -316,7 +316,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
err = r.Get(ctx, types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingListener.Namespace}, proxySecret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if proxySecret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if proxySecret.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener proxy secret")
|
||||
if err := r.Delete(ctx, proxySecret); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener proxy secret: %w", err)
|
||||
|
|
@ -333,7 +333,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerRoleBinding.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listenerRoleBinding.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener role binding")
|
||||
if err := r.Delete(ctx, listenerRoleBinding); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener role binding: %w", err)
|
||||
|
|
@ -349,7 +349,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerRole.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listenerRole.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener role")
|
||||
if err := r.Delete(ctx, listenerRole); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener role: %w", err)
|
||||
|
|
@ -366,7 +366,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerSa.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listenerSa.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener service account")
|
||||
if err := r.Delete(ctx, listenerSa); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener service account: %w", err)
|
||||
|
|
@ -382,7 +382,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
|
||||
newServiceAccount := r.newScaleSetListenerServiceAccount(autoscalingListener)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -467,7 +467,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||
|
||||
logger.Info("Creating listener config secret")
|
||||
|
||||
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
|
||||
podConfig, err := r.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to build listener config secret")
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -486,7 +486,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
|
||||
newPod, err := r.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to build listener pod")
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -546,7 +546,7 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
||||
newListenerSecret := r.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -618,7 +618,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener)
|
||||
newRole := r.newScaleSetListenerRole(autoscalingListener)
|
||||
|
||||
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
|
||||
if err := r.Create(ctx, newRole); err != nil {
|
||||
|
|
@ -646,7 +646,7 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
|
|||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
|
||||
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
||||
newRoleBinding := r.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
||||
|
||||
logger.Info("Creating listener role binding",
|
||||
"namespace", newRoleBinding.Namespace,
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !autoscalingRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !autoscalingRunnerSet.DeletionTimestamp.IsZero() {
|
||||
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
|
@ -332,7 +332,7 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
|
|||
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listener.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listener.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener")
|
||||
if err := r.Delete(ctx, &listener); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener: %w", err)
|
||||
|
|
@ -369,7 +369,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
|
|||
for i := range oldRunnerSets {
|
||||
rs := &oldRunnerSets[i]
|
||||
// already deleted but contains finalizer so it still exists
|
||||
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rs.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Skip ephemeral runner set since it is already marked for deletion", "name", rs.Name)
|
||||
continue
|
||||
}
|
||||
|
|
@ -622,7 +622,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
|||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
desiredRunnerSet, err := r.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not create EphemeralRunnerSet")
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -651,7 +651,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
|||
})
|
||||
}
|
||||
|
||||
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
|
||||
autoscalingListener, err := r.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not create AutoscalingListener spec")
|
||||
return ctrl.Result{}, err
|
||||
|
|
|
|||
|
|
@ -280,10 +280,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||
// This should trigger re-creation of EphemeralRunnerSet and Listener
|
||||
patched := autoscalingRunnerSet.DeepCopy()
|
||||
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
|
||||
if patched.ObjectMeta.Annotations == nil {
|
||||
patched.ObjectMeta.Annotations = make(map[string]string)
|
||||
if patched.Annotations == nil {
|
||||
patched.Annotations = make(map[string]string)
|
||||
}
|
||||
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "test-hash"
|
||||
patched.Annotations[annotationKeyValuesHash] = "test-hash"
|
||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||
autoscalingRunnerSet = patched.DeepCopy()
|
||||
|
|
@ -383,7 +383,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||
Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
|
||||
|
||||
patched = autoscalingRunnerSet.DeepCopy()
|
||||
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "hash-changes"
|
||||
patched.Annotations[annotationKeyValuesHash] = "hash-changes"
|
||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||
|
||||
|
|
@ -546,10 +546,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
|||
// Patch the AutoScalingRunnerSet image which should trigger
|
||||
// the recreation of the Listener and EphemeralRunnerSet
|
||||
patched := autoscalingRunnerSet.DeepCopy()
|
||||
if patched.ObjectMeta.Annotations == nil {
|
||||
patched.ObjectMeta.Annotations = make(map[string]string)
|
||||
if patched.Annotations == nil {
|
||||
patched.Annotations = make(map[string]string)
|
||||
}
|
||||
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "testgroup2"
|
||||
patched.Annotations[annotationKeyValuesHash] = "testgroup2"
|
||||
patched.Spec.Template.Spec = corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
|
|
@ -875,7 +875,7 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
|
|||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||
|
||||
ars.ObjectMeta.Annotations = make(map[string]string)
|
||||
ars.Annotations = make(map[string]string)
|
||||
err = k8sClient.Update(ctx, ars)
|
||||
Expect(err).NotTo(HaveOccurred(), "Update autoscaling runner set without annotation should be successful")
|
||||
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !ephemeralRunner.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !ephemeralRunner.DeletionTimestamp.IsZero() {
|
||||
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
|
@ -319,7 +319,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
|
|||
err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
|
||||
switch {
|
||||
case err == nil:
|
||||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the runner pod")
|
||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete pod: %w", err)
|
||||
|
|
@ -339,7 +339,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
|
|||
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, secret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if secret.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the jitconfig secret")
|
||||
if err := r.Delete(ctx, secret); err != nil && !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete secret: %w", err)
|
||||
|
|
@ -393,7 +393,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
|
|||
var errs []error
|
||||
for i := range runnerLinkedPodList.Items {
|
||||
linkedPod := &runnerLinkedPodList.Items[i]
|
||||
if !linkedPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !linkedPod.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -409,7 +409,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
|
|||
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
runnerLinkedLabels := client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": ephemeralRunner.ObjectMeta.Name,
|
||||
"runner-pod": ephemeralRunner.Name,
|
||||
},
|
||||
)
|
||||
var runnerLinkedSecretList corev1.SecretList
|
||||
|
|
@ -427,7 +427,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
|
|||
var errs []error
|
||||
for i := range runnerLinkedSecretList.Items {
|
||||
s := &runnerLinkedSecretList.Items[i]
|
||||
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !s.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -474,7 +474,7 @@ func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemera
|
|||
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
||||
// It should not be responsible for setting the status to Failed.
|
||||
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
|
||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete pod with status failed: %w", err)
|
||||
|
|
@ -640,7 +640,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
|||
}
|
||||
|
||||
log.Info("Creating new pod for ephemeral runner")
|
||||
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
|
||||
newPod := r.newEphemeralRunnerPod(ctx, runner, secret, envs...)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
|
||||
log.Error(err, "Failed to set controller reference to a new pod")
|
||||
|
|
@ -665,7 +665,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
|||
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||
log.Info("Creating new secret for ephemeral runner")
|
||||
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
|
||||
jitSecret := r.newEphemeralRunnerJitSecret(runner)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %w", err)
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
|||
}
|
||||
|
||||
// Requested deletion does not need reconciled.
|
||||
if !ephemeralRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !ephemeralRunnerSet.DeletionTimestamp.IsZero() {
|
||||
if !controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
|
@ -360,7 +360,7 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
|
|||
// Track multiple errors at once and return the bundle.
|
||||
errs := make([]error, 0)
|
||||
for i := 0; i < count; i++ {
|
||||
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet)
|
||||
ephemeralRunner := r.newEphemeralRunner(runnerSet)
|
||||
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
|
||||
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
|
||||
}
|
||||
|
|
@ -641,7 +641,7 @@ func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList)
|
|||
if err == nil && patchID > ephemeralRunnerState.latestPatchID {
|
||||
ephemeralRunnerState.latestPatchID = patchID
|
||||
}
|
||||
if !r.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !r.DeletionTimestamp.IsZero() {
|
||||
ephemeralRunnerState.deleting = append(ephemeralRunnerState.deleting, r)
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -543,8 +543,8 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
|||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||
GenerateName: autoscalingRunnerSet.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: newAnnotations,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
|
|
@ -617,18 +617,18 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
|||
labels := map[string]string{}
|
||||
annotations := map[string]string{}
|
||||
|
||||
for k, v := range runner.ObjectMeta.Labels {
|
||||
for k, v := range runner.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
for k, v := range runner.Spec.PodTemplateSpec.Labels {
|
||||
for k, v := range runner.Spec.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
|
||||
|
||||
for k, v := range runner.ObjectMeta.Annotations {
|
||||
for k, v := range runner.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
for k, v := range runner.Spec.PodTemplateSpec.Annotations {
|
||||
for k, v := range runner.Spec.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
|
||||
|
|
@ -640,8 +640,8 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
|||
)
|
||||
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
|
|
@ -657,10 +657,10 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
|||
}
|
||||
|
||||
newPod.ObjectMeta = objectMeta
|
||||
newPod.Spec = runner.Spec.PodTemplateSpec.Spec
|
||||
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.PodTemplateSpec.Spec.Containers))
|
||||
newPod.Spec = runner.Spec.Spec
|
||||
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.Spec.Containers))
|
||||
|
||||
for _, c := range runner.Spec.PodTemplateSpec.Spec.Containers {
|
||||
for _, c := range runner.Spec.Spec.Containers {
|
||||
if c.Name == v1alpha1.EphemeralRunnerContainerName {
|
||||
c.Env = append(
|
||||
c.Env,
|
||||
|
|
|
|||
|
|
@ -345,7 +345,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||
}
|
||||
|
||||
var runnerPodList corev1.PodList
|
||||
if err := r.Client.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
|
||||
if err := r.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
|
||||
kindLabel: hra.Spec.ScaleTargetRef.Name,
|
||||
})); err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func newGithubClient(server *httptest.Server) *github.Client {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
client.Client.BaseURL = baseURL
|
||||
client.BaseURL = baseURL
|
||||
|
||||
return client
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,8 +82,8 @@ func (s *batchScaler) Add(st *ScaleTarget) {
|
|||
break batch
|
||||
case st := <-s.queue:
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: st.HorizontalRunnerAutoscaler.Namespace,
|
||||
Name: st.HorizontalRunnerAutoscaler.Name,
|
||||
Namespace: st.Namespace,
|
||||
Name: st.Name,
|
||||
}
|
||||
b, ok := batches[nsName]
|
||||
if !ok {
|
||||
|
|
@ -208,7 +208,7 @@ func (s *batchScaler) planBatchScale(ctx context.Context, batch batchScaleOperat
|
|||
//
|
||||
// In other words, updating HRA.spec.scaleTriggers[].duration does not result in delaying capacity reservations expiration any longer
|
||||
// than the "intended" duration, which is the duration of the trigger when the reservation was created.
|
||||
duration := copy.Spec.CapacityReservations[i].ExpirationTime.Time.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time)
|
||||
duration := copy.Spec.CapacityReservations[i].ExpirationTime.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time)
|
||||
copy.Spec.CapacityReservations[i].EffectiveTime = metav1.Time{Time: now}
|
||||
copy.Spec.CapacityReservations[i].ExpirationTime = metav1.Time{Time: now.Add(duration)}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -503,13 +503,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getManagedRunnerGroup
|
|||
switch kind {
|
||||
case "RunnerSet":
|
||||
var rs v1alpha1.RunnerSet
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
return groups, err
|
||||
}
|
||||
o, e, g = rs.Spec.Organization, rs.Spec.Enterprise, rs.Spec.Group
|
||||
case "RunnerDeployment", "":
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
return groups, err
|
||||
}
|
||||
o, e, g = rd.Spec.Template.Spec.Organization, rd.Spec.Template.Spec.Enterprise, rd.Spec.Template.Spec.Group
|
||||
|
|
@ -562,7 +562,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleTarget(ctx
|
|||
|
||||
HRA:
|
||||
for _, hra := range hras {
|
||||
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !hra.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -603,7 +603,7 @@ HRA:
|
|||
case "RunnerSet":
|
||||
var rs v1alpha1.RunnerSet
|
||||
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -634,7 +634,7 @@ HRA:
|
|||
case "RunnerDeployment", "":
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -676,7 +676,7 @@ func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscale
|
|||
now := time.Now()
|
||||
|
||||
for _, reservation := range autoscaler.Spec.CapacityReservations {
|
||||
if reservation.ExpirationTime.Time.After(now) {
|
||||
if reservation.ExpirationTime.After(now) {
|
||||
capacityReservations = append(capacityReservations, reservation)
|
||||
}
|
||||
}
|
||||
|
|
@ -713,7 +713,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client
|
|||
switch hra.Spec.ScaleTargetRef.Kind {
|
||||
case "", "RunnerDeployment":
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
|
||||
return nil
|
||||
}
|
||||
|
|
@ -740,7 +740,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client
|
|||
return keys
|
||||
case "RunnerSet":
|
||||
var rs v1alpha1.RunnerSet
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !hra.DeletionTimestamp.IsZero() {
|
||||
r.GitHubClient.DeinitForHRA(&hra)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
|
|
@ -91,7 +91,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rd.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
|
|
@ -120,14 +120,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
|||
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
|
||||
}
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
if err := r.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
} else if ephemeral && effectiveTime != nil {
|
||||
copy := rd.DeepCopy()
|
||||
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
if err := r.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
}
|
||||
|
|
@ -142,7 +142,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rs.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
|
|
@ -160,7 +160,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
|||
org: rs.Spec.Organization,
|
||||
repo: rs.Spec.Repository,
|
||||
replicas: replicas,
|
||||
labels: rs.Spec.RunnerConfig.Labels,
|
||||
labels: rs.Spec.Labels,
|
||||
getRunnerMap: func() (map[string]struct{}, error) {
|
||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||
var runnerPodList corev1.PodList
|
||||
|
|
@ -224,14 +224,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
|||
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
|
||||
}
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||
if err := r.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
} else if ephemeral && effectiveTime != nil {
|
||||
copy := rs.DeepCopy()
|
||||
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||
if err := r.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
}
|
||||
|
|
@ -253,7 +253,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) scaleTargetFromRD(ctx context.Con
|
|||
org: rd.Spec.Template.Spec.Organization,
|
||||
repo: rd.Spec.Template.Spec.Repository,
|
||||
replicas: rd.Spec.Replicas,
|
||||
labels: rd.Spec.Template.Spec.RunnerConfig.Labels,
|
||||
labels: rd.Spec.Template.Spec.Labels,
|
||||
getRunnerMap: func() (map[string]struct{}, error) {
|
||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||
var runnerList v1alpha1.RunnerList
|
||||
|
|
@ -484,7 +484,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(ghc *arc
|
|||
var reserved int
|
||||
|
||||
for _, reservation := range hra.Spec.CapacityReservations {
|
||||
if reservation.ExpirationTime.Time.After(now) {
|
||||
if reservation.ExpirationTime.After(now) {
|
||||
reserved += reservation.Replicas
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,12 +20,13 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"github.com/go-logr/logr"
|
||||
|
|
@ -107,12 +108,12 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if runner.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
|
||||
if runner.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runner.Finalizers, finalizerName)
|
||||
|
||||
if added {
|
||||
newRunner := runner.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
newRunner.Finalizers = finalizers
|
||||
|
||||
if err := r.Update(ctx, newRunner); err != nil {
|
||||
log.Error(err, "Failed to update runner")
|
||||
|
|
@ -271,11 +272,11 @@ func ephemeralRunnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
|||
}
|
||||
|
||||
func (r *RunnerReconciler) processRunnerDeletion(runner v1alpha1.Runner, ctx context.Context, log logr.Logger, pod *corev1.Pod) (reconcile.Result, error) {
|
||||
finalizers, removed := removeFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
|
||||
finalizers, removed := removeFinalizer(runner.Finalizers, finalizerName)
|
||||
|
||||
if removed {
|
||||
newRunner := runner.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
newRunner.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runner)); err != nil {
|
||||
log.Error(err, "Unable to remove finalizer")
|
||||
|
|
@ -305,8 +306,8 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
|||
if needsServiceAccount {
|
||||
serviceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
},
|
||||
}
|
||||
if res := r.createObject(ctx, serviceAccount, serviceAccount.ObjectMeta, &runner, log); res != nil {
|
||||
|
|
@ -321,7 +322,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
|||
APIGroups: []string{"actions.summerwind.dev"},
|
||||
Resources: []string{"runners/status"},
|
||||
Verbs: []string{"get", "update", "patch"},
|
||||
ResourceNames: []string{runner.ObjectMeta.Name},
|
||||
ResourceNames: []string{runner.Name},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
|
@ -359,8 +360,8 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
|||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
},
|
||||
Rules: rules,
|
||||
}
|
||||
|
|
@ -370,19 +371,19 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
|||
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Name: runner.Name,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -482,7 +483,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||
|
||||
labels := map[string]string{}
|
||||
|
||||
for k, v := range runner.ObjectMeta.Labels {
|
||||
for k, v := range runner.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
|
||||
|
|
@ -511,8 +512,8 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||
//
|
||||
// See https://github.com/actions/actions-runner-controller/issues/143 for more context.
|
||||
labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects(
|
||||
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
|
||||
runner.ObjectMeta.Annotations,
|
||||
filterLabels(runner.Labels, LabelKeyRunnerTemplateHash),
|
||||
runner.Annotations,
|
||||
runner.Spec,
|
||||
ghc.GithubBaseURL,
|
||||
// Token change should trigger replacement.
|
||||
|
|
@ -523,10 +524,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||
)
|
||||
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: runner.ObjectMeta.Annotations,
|
||||
Annotations: runner.Annotations,
|
||||
}
|
||||
|
||||
template.ObjectMeta = objectMeta
|
||||
|
|
@ -649,7 +650,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||
if runnerSpec.ServiceAccountName != "" {
|
||||
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
||||
} else if r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
|
||||
pod.Spec.ServiceAccountName = runner.ObjectMeta.Name
|
||||
pod.Spec.ServiceAccountName = runner.Name
|
||||
}
|
||||
|
||||
if runnerSpec.AutomountServiceAccountToken != nil {
|
||||
|
|
@ -704,7 +705,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||
pod.Spec.RuntimeClassName = runnerSpec.RuntimeClassName
|
||||
}
|
||||
|
||||
pod.ObjectMeta.Name = runner.ObjectMeta.Name
|
||||
pod.Name = runner.Name
|
||||
|
||||
// Inject the registration token and the runner name
|
||||
updated := mutatePod(&pod, runner.Status.Registration.Token)
|
||||
|
|
@ -720,7 +721,7 @@ func mutatePod(pod *corev1.Pod, token string) *corev1.Pod {
|
|||
updated := pod.DeepCopy()
|
||||
|
||||
if getRunnerEnv(pod, EnvVarRunnerName) == "" {
|
||||
setRunnerEnv(updated, EnvVarRunnerName, pod.ObjectMeta.Name)
|
||||
setRunnerEnv(updated, EnvVarRunnerName, pod.Name)
|
||||
}
|
||||
|
||||
if getRunnerEnv(pod, EnvVarRunnerToken) == "" {
|
||||
|
|
@ -770,11 +771,11 @@ func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
|
|||
|
||||
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) {
|
||||
var (
|
||||
privileged bool = true
|
||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||
dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
|
||||
ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
|
||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||
privileged = true
|
||||
dockerdInRunner = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||
dockerEnabled = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
|
||||
ephemeral = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
|
||||
dockerdInRunnerPrivileged = dockerdInRunner
|
||||
|
||||
defaultRunnerImage = d.RunnerImage
|
||||
defaultRunnerImagePullSecrets = d.RunnerImagePullSecrets
|
||||
|
|
@ -797,10 +798,10 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||
template = *template.DeepCopy()
|
||||
|
||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunner, "")
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||
template.Labels = CloneAndAddLabel(template.Labels, LabelKeyRunner, "")
|
||||
template.Labels = CloneAndAddLabel(template.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||
if runnerSpec.GitHubAPICredentialsFrom != nil {
|
||||
template.ObjectMeta.Annotations = CloneAndAddLabel(template.ObjectMeta.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
|
||||
template.Annotations = CloneAndAddLabel(template.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
|
||||
}
|
||||
|
||||
workDir := runnerSpec.WorkDir
|
||||
|
|
@ -887,10 +888,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
|||
|
||||
for i := range template.Spec.Containers {
|
||||
c := template.Spec.Containers[i]
|
||||
if c.Name == containerName {
|
||||
switch c.Name {
|
||||
case containerName:
|
||||
runnerContainerIndex = i
|
||||
runnerContainer = &c
|
||||
} else if c.Name == "docker" {
|
||||
case "docker":
|
||||
dockerdContainerIndex = i
|
||||
dockerdContainer = &c
|
||||
}
|
||||
|
|
@ -1364,7 +1366,7 @@ func applyWorkVolumeClaimTemplateToPod(pod *corev1.Pod, workVolumeClaimTemplate
|
|||
}
|
||||
for i := range pod.Spec.Volumes {
|
||||
if pod.Spec.Volumes[i].Name == "work" {
|
||||
return fmt.Errorf("Work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead.")
|
||||
return fmt.Errorf("work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead")
|
||||
}
|
||||
}
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, workVolumeClaimTemplate.V1Volume())
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
}
|
||||
|
||||
if len(envvars) == 0 {
|
||||
return ctrl.Result{}, errors.New("Could not determine env vars for runner Pod")
|
||||
return ctrl.Result{}, errors.New("could not determine env vars for runner Pod")
|
||||
}
|
||||
|
||||
var enterprise, org, repo string
|
||||
|
|
@ -103,8 +103,8 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
if runnerPod.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runnerPod.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
var cleanupFinalizersAdded bool
|
||||
if isContainerMode {
|
||||
|
|
@ -113,7 +113,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
|
||||
if added || cleanupFinalizersAdded {
|
||||
newRunner := runnerPod.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
newRunner.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runnerPod)); err != nil {
|
||||
log.Error(err, "Failed to update runner")
|
||||
|
|
@ -142,7 +142,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
}
|
||||
}
|
||||
|
||||
if finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
||||
if finalizers, removed := removeFinalizer(runnerPod.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
||||
if err := r.cleanupRunnerLinkedPods(ctx, &runnerPod, log); err != nil {
|
||||
log.Info("Runner-linked pods clean up that has failed due to an error. If this persists, please manually remove the runner-linked pods to unblock ARC", "err", err.Error())
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||
|
|
@ -152,7 +152,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||
}
|
||||
patchedPod := runnerPod.DeepCopy()
|
||||
patchedPod.ObjectMeta.Finalizers = finalizers
|
||||
patchedPod.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
|
||||
log.Error(err, "Failed to update runner for finalizer linked resources removal")
|
||||
|
|
@ -163,7 +163,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
runnerPod = *patchedPod
|
||||
}
|
||||
|
||||
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
finalizers, removed := removeFinalizer(runnerPod.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
if removed {
|
||||
// In a standard scenario, the upstream controller, like runnerset-controller, ensures this runner to be gracefully stopped before the deletion timestamp is set.
|
||||
|
|
@ -175,7 +175,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
}
|
||||
|
||||
patchedPod := updatedPod.DeepCopy()
|
||||
patchedPod.ObjectMeta.Finalizers = finalizers
|
||||
patchedPod.Finalizers = finalizers
|
||||
|
||||
// We commit the removal of the finalizer so that Kuberenetes notices it and delete the pod resource from the cluster.
|
||||
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
|
||||
|
|
@ -284,7 +284,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *
|
|||
var runnerLinkedPodList corev1.PodList
|
||||
if err := r.List(ctx, &runnerLinkedPodList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": pod.ObjectMeta.Name,
|
||||
"runner-pod": pod.Name,
|
||||
},
|
||||
)); err != nil {
|
||||
return fmt.Errorf("failed to list runner-linked pods: %w", err)
|
||||
|
|
@ -295,7 +295,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *
|
|||
errs []error
|
||||
)
|
||||
for _, p := range runnerLinkedPodList.Items {
|
||||
if !p.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !p.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -307,7 +307,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *
|
|||
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||
return
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.ObjectMeta.Name, err))
|
||||
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.Name, err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -330,7 +330,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, po
|
|||
var runnerLinkedSecretList corev1.SecretList
|
||||
if err := r.List(ctx, &runnerLinkedSecretList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": pod.ObjectMeta.Name,
|
||||
"runner-pod": pod.Name,
|
||||
},
|
||||
)); err != nil {
|
||||
return fmt.Errorf("failed to list runner-linked secrets: %w", err)
|
||||
|
|
@ -341,7 +341,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, po
|
|||
errs []error
|
||||
)
|
||||
for _, s := range runnerLinkedSecretList.Items {
|
||||
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !s.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -353,7 +353,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, po
|
|||
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||
return
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.ObjectMeta.Name, err))
|
||||
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.Name, err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ var _ owner = (*ownerStatefulSet)(nil)
|
|||
func (s *ownerStatefulSet) pods(ctx context.Context, c client.Client) ([]corev1.Pod, error) {
|
||||
var podList corev1.PodList
|
||||
|
||||
if err := c.List(ctx, &podList, client.MatchingLabels(s.StatefulSet.Spec.Template.ObjectMeta.Labels)); err != nil {
|
||||
if err := c.List(ctx, &podList, client.MatchingLabels(s.StatefulSet.Spec.Template.Labels)); err != nil {
|
||||
s.Log.Error(err, "Failed to list pods managed by statefulset")
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rd.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
|
|
@ -112,7 +112,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
}
|
||||
|
||||
if newestSet == nil {
|
||||
if err := r.Client.Create(ctx, desiredRS); err != nil {
|
||||
if err := r.Create(ctx, desiredRS); err != nil {
|
||||
log.Error(err, "Failed to create runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -138,7 +138,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
}
|
||||
|
||||
if newestTemplateHash != desiredTemplateHash {
|
||||
if err := r.Client.Create(ctx, desiredRS); err != nil {
|
||||
if err := r.Create(ctx, desiredRS); err != nil {
|
||||
log.Error(err, "Failed to create runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -159,7 +159,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
// but we still need to update the existing replicaset with it.
|
||||
// Otherwise selector-based runner query will never work on replicasets created before the controller v0.17.0
|
||||
// See https://github.com/actions/actions-runner-controller/pull/355#discussion_r585379259
|
||||
if err := r.Client.Update(ctx, updateSet); err != nil {
|
||||
if err := r.Update(ctx, updateSet); err != nil {
|
||||
log.Error(err, "Failed to update runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -195,7 +195,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
newestSet.Spec.Replicas = &newDesiredReplicas
|
||||
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
||||
|
||||
if err := r.Client.Update(ctx, newestSet); err != nil {
|
||||
if err := r.Update(ctx, newestSet); err != nil {
|
||||
log.Error(err, "Failed to update runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -257,7 +257,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
updated := rs.DeepCopy()
|
||||
zero := 0
|
||||
updated.Spec.Replicas = &zero
|
||||
if err := r.Client.Update(ctx, updated); err != nil {
|
||||
if err := r.Update(ctx, updated); err != nil {
|
||||
rslog.Error(err, "Failed to scale runnerreplicaset to zero")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -268,7 +268,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
continue
|
||||
}
|
||||
|
||||
if err := r.Client.Delete(ctx, &rs); err != nil {
|
||||
if err := r.Delete(ctx, &rs); err != nil {
|
||||
rslog.Error(err, "Failed to delete runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
|
|
@ -445,10 +445,10 @@ func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []str
|
|||
templateHash := ComputeHash(&newRSTemplate)
|
||||
|
||||
// Add template hash label to selector.
|
||||
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
newRSTemplate.Labels = CloneAndAddLabel(newRSTemplate.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerDeploymentName, rd.Name)
|
||||
newRSTemplate.Labels = CloneAndAddLabel(newRSTemplate.Labels, LabelKeyRunnerDeploymentName, rd.Name)
|
||||
|
||||
selector := getSelector(rd)
|
||||
|
||||
|
|
@ -457,9 +457,9 @@ func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []str
|
|||
rs := v1alpha1.RunnerReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: rd.ObjectMeta.Name + "-",
|
||||
Namespace: rd.ObjectMeta.Namespace,
|
||||
Labels: newRSTemplate.ObjectMeta.Labels,
|
||||
GenerateName: rd.Name + "-",
|
||||
Namespace: rd.Namespace,
|
||||
Labels: newRSTemplate.Labels,
|
||||
},
|
||||
Spec: v1alpha1.RunnerReplicaSetSpec{
|
||||
Replicas: rd.Spec.Replicas,
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rs.DeletionTimestamp.IsZero() {
|
||||
// RunnerReplicaSet cannot be gracefuly removed.
|
||||
// That means any runner that is running a job can be prematurely terminated.
|
||||
// To gracefully remove a RunnerReplicaSet, scale it down to zero first, observe RunnerReplicaSet's status replicas,
|
||||
|
|
@ -70,14 +70,14 @@ func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if rs.ObjectMeta.Labels == nil {
|
||||
rs.ObjectMeta.Labels = map[string]string{}
|
||||
if rs.Labels == nil {
|
||||
rs.Labels = map[string]string{}
|
||||
}
|
||||
|
||||
// Template hash is usually set by the upstream controller(RunnerDeplloyment controller) on authoring
|
||||
// RunerReplicaset resource, but it may be missing when the user directly created RunnerReplicaSet.
|
||||
// As a template hash is required by by the runner replica management, we dynamically add it here without ever persisting it.
|
||||
if rs.ObjectMeta.Labels[LabelKeyRunnerTemplateHash] == "" {
|
||||
if rs.Labels[LabelKeyRunnerTemplateHash] == "" {
|
||||
template := rs.Spec.DeepCopy()
|
||||
template.Replicas = nil
|
||||
template.EffectiveTime = nil
|
||||
|
|
@ -85,8 +85,8 @@ func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
|
||||
log.Info("Using auto-generated template hash", "value", templateHash)
|
||||
|
||||
rs.ObjectMeta.Labels = CloneAndAddLabel(rs.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
rs.Spec.Template.ObjectMeta.Labels = CloneAndAddLabel(rs.Spec.Template.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
rs.Labels = CloneAndAddLabel(rs.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
rs.Spec.Template.Labels = CloneAndAddLabel(rs.Spec.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
|
|
@ -169,8 +169,8 @@ func (r *RunnerReplicaSetReconciler) newRunner(rs v1alpha1.RunnerReplicaSet) (v1
|
|||
// the "runner template hash" label to the template.meta which is necessary to make this controller work correctly
|
||||
objectMeta := rs.Spec.Template.ObjectMeta.DeepCopy()
|
||||
|
||||
objectMeta.GenerateName = rs.ObjectMeta.Name + "-"
|
||||
objectMeta.Namespace = rs.ObjectMeta.Namespace
|
||||
objectMeta.GenerateName = rs.Name + "-"
|
||||
objectMeta.Namespace = rs.Namespace
|
||||
if objectMeta.Annotations == nil {
|
||||
objectMeta.Annotations = map[string]string{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !runnerSet.DeletionTimestamp.IsZero() {
|
||||
r.GitHubClient.DeinitForRunnerSet(runnerSet)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
|
|
@ -191,11 +191,11 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
|||
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
|
||||
|
||||
template := corev1.Pod{
|
||||
ObjectMeta: runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta,
|
||||
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
||||
ObjectMeta: runnerSetWithOverrides.Template.ObjectMeta,
|
||||
Spec: runnerSetWithOverrides.Template.Spec,
|
||||
}
|
||||
|
||||
if runnerSet.Spec.RunnerConfig.ContainerMode == "kubernetes" {
|
||||
if runnerSet.Spec.ContainerMode == "kubernetes" {
|
||||
found := false
|
||||
for i := range template.Spec.Containers {
|
||||
if template.Spec.Containers[i].Name == containerName {
|
||||
|
|
@ -208,7 +208,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
|||
})
|
||||
}
|
||||
|
||||
workDir := runnerSet.Spec.RunnerConfig.WorkDir
|
||||
workDir := runnerSet.Spec.WorkDir
|
||||
if workDir == "" {
|
||||
workDir = "/runner/_work"
|
||||
}
|
||||
|
|
@ -219,7 +219,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
|||
template.Spec.ServiceAccountName = runnerSet.Spec.ServiceAccountName
|
||||
}
|
||||
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||
template.Labels = CloneAndAddLabel(template.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||
|
||||
ghc, err := r.GitHubClient.InitForRunnerSet(ctx, runnerSet)
|
||||
if err != nil {
|
||||
|
|
@ -228,38 +228,38 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
|||
|
||||
githubBaseURL := ghc.GithubBaseURL
|
||||
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults)
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta = pod.ObjectMeta
|
||||
runnerSetWithOverrides.StatefulSetSpec.Template.Spec = pod.Spec
|
||||
runnerSetWithOverrides.Template.ObjectMeta = pod.ObjectMeta
|
||||
runnerSetWithOverrides.Template.Spec = pod.Spec
|
||||
// NOTE: Seems like the only supported restart policy for statefulset is "Always"?
|
||||
// I got errosr like the below when tried to use "OnFailure":
|
||||
// StatefulSet.apps \"example-runnersetpg9rx\" is invalid: [spec.template.metadata.labels: Invalid value: map[string]string{\"runner-template-hash\"
|
||||
// :\"85d7578bd6\", \"runnerset-name\":\"example-runnerset\"}: `selector` does not match template `labels`, spec.
|
||||
// template.spec.restartPolicy: Unsupported value: \"OnFailure\": supported values: \"Always\"]
|
||||
runnerSetWithOverrides.StatefulSetSpec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways
|
||||
runnerSetWithOverrides.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways
|
||||
|
||||
templateHash := ComputeHash(pod.Spec)
|
||||
|
||||
// Add template hash label to selector.
|
||||
runnerSetWithOverrides.Template.ObjectMeta.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
runnerSetWithOverrides.Template.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
selector := getRunnerSetSelector(runnerSet)
|
||||
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerTemplateHash, templateHash)
|
||||
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerSetName, runnerSet.Name)
|
||||
selector = CloneSelectorAndAddLabel(selector, LabelKeyPodMutation, LabelValuePodMutation)
|
||||
|
||||
runnerSetWithOverrides.StatefulSetSpec.Selector = selector
|
||||
runnerSetWithOverrides.Selector = selector
|
||||
|
||||
rs := appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: runnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: runnerSet.ObjectMeta.Namespace,
|
||||
Labels: CloneAndAddLabel(runnerSet.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash),
|
||||
GenerateName: runnerSet.Name + "-",
|
||||
Namespace: runnerSet.Namespace,
|
||||
Labels: CloneAndAddLabel(runnerSet.Labels, LabelKeyRunnerTemplateHash, templateHash),
|
||||
Annotations: map[string]string{
|
||||
SyncTimeAnnotationKey: time.Now().Format(time.RFC3339),
|
||||
},
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ const (
|
|||
func syncVolumes(ctx context.Context, c client.Client, log logr.Logger, ns string, runnerSet *v1alpha1.RunnerSet, statefulsets []appsv1.StatefulSet) (*ctrl.Result, error) {
|
||||
log = log.WithValues("ns", ns)
|
||||
|
||||
for _, t := range runnerSet.Spec.StatefulSetSpec.VolumeClaimTemplates {
|
||||
for _, t := range runnerSet.Spec.VolumeClaimTemplates {
|
||||
for _, sts := range statefulsets {
|
||||
pvcName := fmt.Sprintf("%s-%s-0", t.Name, sts.Name)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ type testResourceReader struct {
|
|||
}
|
||||
|
||||
func (r *testResourceReader) Get(_ context.Context, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error {
|
||||
nsName := types.NamespacedName{Namespace: key.Namespace, Name: key.Name}
|
||||
nsName := types.NamespacedName(key)
|
||||
ret, ok := r.objects[nsName]
|
||||
if !ok {
|
||||
return &kerrors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}}
|
||||
|
|
|
|||
|
|
@ -64,22 +64,22 @@ func Test_workVolumeClaimTemplateVolumeV1VolumeTransformation(t *testing.T) {
|
|||
t.Errorf("want name %q, got %q\n", want.Name, got.Name)
|
||||
}
|
||||
|
||||
if got.VolumeSource.Ephemeral == nil {
|
||||
if got.Ephemeral == nil {
|
||||
t.Fatal("work volume claim template should transform itself into Ephemeral volume source\n")
|
||||
}
|
||||
|
||||
if got.VolumeSource.Ephemeral.VolumeClaimTemplate == nil {
|
||||
if got.Ephemeral.VolumeClaimTemplate == nil {
|
||||
t.Fatal("work volume claim template should have ephemeral volume claim template set\n")
|
||||
}
|
||||
|
||||
gotClassName := *got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
wantClassName := *want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
gotClassName := *got.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
wantClassName := *want.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
if gotClassName != wantClassName {
|
||||
t.Errorf("expected storage class name %q, got %q\n", wantClassName, gotClassName)
|
||||
}
|
||||
|
||||
gotAccessModes := got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
wantAccessModes := want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
gotAccessModes := got.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
wantAccessModes := want.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
if len(gotAccessModes) != len(wantAccessModes) {
|
||||
t.Fatalf("access modes lengths missmatch: got %v, expected %v\n", gotAccessModes, wantAccessModes)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ func TestAcquireJobs(t *testing.T) {
|
|||
RunnerScaleSet: &actions.RunnerScaleSet{Id: 1},
|
||||
MessageQueueAccessToken: "abc",
|
||||
}
|
||||
var requestIDs []int64 = []int64{1}
|
||||
var requestIDs = []int64{1}
|
||||
|
||||
retryMax := 1
|
||||
actualRetry := 0
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ func TestGetRunnerByName(t *testing.T) {
|
|||
|
||||
t.Run("Get Runner by Name", func(t *testing.T) {
|
||||
var runnerID int64 = 1
|
||||
var runnerName string = "self-hosted-ubuntu"
|
||||
var runnerName = "self-hosted-ubuntu"
|
||||
want := &actions.RunnerReference{
|
||||
Id: int(runnerID),
|
||||
Name: runnerName,
|
||||
|
|
@ -87,7 +87,7 @@ func TestGetRunnerByName(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Get Runner by name with not exist runner", func(t *testing.T) {
|
||||
var runnerName string = "self-hosted-ubuntu"
|
||||
var runnerName = "self-hosted-ubuntu"
|
||||
response := []byte(`{"count": 0, "value": []}`)
|
||||
|
||||
server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -103,7 +103,7 @@ func TestGetRunnerByName(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Default retries on server error", func(t *testing.T) {
|
||||
var runnerName string = "self-hosted-ubuntu"
|
||||
var runnerName = "self-hosted-ubuntu"
|
||||
|
||||
retryWaitMax := 1 * time.Millisecond
|
||||
retryMax := 1
|
||||
|
|
@ -181,7 +181,7 @@ func TestGetRunnerGroupByName(t *testing.T) {
|
|||
|
||||
t.Run("Get RunnerGroup by Name", func(t *testing.T) {
|
||||
var runnerGroupID int64 = 1
|
||||
var runnerGroupName string = "test-runner-group"
|
||||
var runnerGroupName = "test-runner-group"
|
||||
want := &actions.RunnerGroup{
|
||||
ID: runnerGroupID,
|
||||
Name: runnerGroupName,
|
||||
|
|
@ -201,7 +201,7 @@ func TestGetRunnerGroupByName(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Get RunnerGroup by name with not exist runner group", func(t *testing.T) {
|
||||
var runnerGroupName string = "test-runner-group"
|
||||
var runnerGroupName = "test-runner-group"
|
||||
response := []byte(`{"count": 0, "value": []}`)
|
||||
|
||||
server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||
},
|
||||
|
||||
// For ListRunners
|
||||
"/repos/test/valid/actions/runners": config.FixedResponses.ListRunners,
|
||||
"/repos/test/valid/actions/runners": config.ListRunners,
|
||||
"/repos/test/invalid/actions/runners": &Handler{
|
||||
Status: http.StatusNoContent,
|
||||
Body: "",
|
||||
|
|
@ -204,10 +204,10 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||
},
|
||||
|
||||
// For auto-scaling based on the number of queued(pending) workflow runs
|
||||
"/repos/test/valid/actions/runs": config.FixedResponses.ListRepositoryWorkflowRuns,
|
||||
"/repos/test/valid/actions/runs": config.ListRepositoryWorkflowRuns,
|
||||
|
||||
// For auto-scaling based on the number of queued(pending) workflow jobs
|
||||
"/repos/test/valid/actions/runs/": config.FixedResponses.ListWorkflowJobs,
|
||||
"/repos/test/valid/actions/runs/": config.ListWorkflowJobs,
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ type Option func(*ServerConfig)
|
|||
|
||||
func WithListRepositoryWorkflowRunsResponse(status int, body, queued, in_progress string) Option {
|
||||
return func(c *ServerConfig) {
|
||||
c.FixedResponses.ListRepositoryWorkflowRuns = &Handler{
|
||||
c.ListRepositoryWorkflowRuns = &Handler{
|
||||
Status: status,
|
||||
Body: body,
|
||||
Statuses: map[string]string{
|
||||
|
|
@ -25,7 +25,7 @@ func WithListRepositoryWorkflowRunsResponse(status int, body, queued, in_progres
|
|||
|
||||
func WithListWorkflowJobsResponse(status int, bodies map[int]string) Option {
|
||||
return func(c *ServerConfig) {
|
||||
c.FixedResponses.ListWorkflowJobs = &MapHandler{
|
||||
c.ListWorkflowJobs = &MapHandler{
|
||||
Status: status,
|
||||
Bodies: bodies,
|
||||
}
|
||||
|
|
@ -34,7 +34,7 @@ func WithListWorkflowJobsResponse(status int, bodies map[int]string) Option {
|
|||
|
||||
func WithListRunnersResponse(status int, body string) Option {
|
||||
return func(c *ServerConfig) {
|
||||
c.FixedResponses.ListRunners = &ListRunnersHandler{
|
||||
c.ListRunners = &ListRunnersHandler{
|
||||
Status: status,
|
||||
Body: body,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -290,7 +290,7 @@ func (c *Client) ListRunnerGroupRepositoryAccesses(ctx context.Context, org stri
|
|||
|
||||
opts := github.ListOptions{PerPage: 100}
|
||||
for {
|
||||
list, res, err := c.Client.Actions.ListRepositoryAccessRunnerGroup(ctx, org, runnerGroupId, &opts)
|
||||
list, res, err := c.Actions.ListRepositoryAccessRunnerGroup(ctx, org, runnerGroupId, &opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list repository access for runner group: %w", err)
|
||||
}
|
||||
|
|
@ -323,32 +323,32 @@ func (c *Client) cleanup() {
|
|||
|
||||
func (c *Client) createRegistrationToken(ctx context.Context, enterprise, org, repo string) (*github.RegistrationToken, *github.Response, error) {
|
||||
if len(repo) > 0 {
|
||||
return c.Client.Actions.CreateRegistrationToken(ctx, org, repo)
|
||||
return c.Actions.CreateRegistrationToken(ctx, org, repo)
|
||||
}
|
||||
if len(org) > 0 {
|
||||
return c.Client.Actions.CreateOrganizationRegistrationToken(ctx, org)
|
||||
return c.Actions.CreateOrganizationRegistrationToken(ctx, org)
|
||||
}
|
||||
return c.Client.Enterprise.CreateRegistrationToken(ctx, enterprise)
|
||||
return c.Enterprise.CreateRegistrationToken(ctx, enterprise)
|
||||
}
|
||||
|
||||
func (c *Client) removeRunner(ctx context.Context, enterprise, org, repo string, runnerID int64) (*github.Response, error) {
|
||||
if len(repo) > 0 {
|
||||
return c.Client.Actions.RemoveRunner(ctx, org, repo, runnerID)
|
||||
return c.Actions.RemoveRunner(ctx, org, repo, runnerID)
|
||||
}
|
||||
if len(org) > 0 {
|
||||
return c.Client.Actions.RemoveOrganizationRunner(ctx, org, runnerID)
|
||||
return c.Actions.RemoveOrganizationRunner(ctx, org, runnerID)
|
||||
}
|
||||
return c.Client.Enterprise.RemoveRunner(ctx, enterprise, runnerID)
|
||||
return c.Enterprise.RemoveRunner(ctx, enterprise, runnerID)
|
||||
}
|
||||
|
||||
func (c *Client) listRunners(ctx context.Context, enterprise, org, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
|
||||
if len(repo) > 0 {
|
||||
return c.Client.Actions.ListRunners(ctx, org, repo, opts)
|
||||
return c.Actions.ListRunners(ctx, org, repo, opts)
|
||||
}
|
||||
if len(org) > 0 {
|
||||
return c.Client.Actions.ListOrganizationRunners(ctx, org, opts)
|
||||
return c.Actions.ListOrganizationRunners(ctx, org, opts)
|
||||
}
|
||||
return c.Client.Enterprise.ListRunners(ctx, enterprise, opts)
|
||||
return c.Enterprise.ListRunners(ctx, enterprise, opts)
|
||||
}
|
||||
|
||||
func (c *Client) ListRepositoryWorkflowRuns(ctx context.Context, user string, repoName string) ([]*github.WorkflowRun, error) {
|
||||
|
|
@ -381,7 +381,7 @@ func (c *Client) listRepositoryWorkflowRuns(ctx context.Context, user string, re
|
|||
}
|
||||
|
||||
for {
|
||||
list, res, err := c.Client.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, &opts)
|
||||
list, res, err := c.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, &opts)
|
||||
|
||||
if err != nil {
|
||||
return workflowRuns, fmt.Errorf("failed to list workflow runs: %v", err)
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ func newTestClient() *Client {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
client.Client.BaseURL = baseURL
|
||||
client.BaseURL = baseURL
|
||||
|
||||
return client
|
||||
}
|
||||
|
|
|
|||
|
|
@ -598,9 +598,9 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
|
|||
}
|
||||
|
||||
e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...))
|
||||
e.Env.Kubeconfig = e.Kind.Kubeconfig()
|
||||
e.Kubeconfig = e.Kind.Kubeconfig()
|
||||
} else {
|
||||
e.Env.Kubeconfig = e.remoteKubeconfig
|
||||
e.Kubeconfig = e.remoteKubeconfig
|
||||
|
||||
// Kind automatically installs https://github.com/rancher/local-path-provisioner for PVs.
|
||||
// But assuming the remote cluster isn't a kind Kubernetes cluster,
|
||||
|
|
|
|||
Loading…
Reference in New Issue