Remove legacy GitHub API cache of HRA.Status.CachedEntries (#1192)
* Remove legacy GitHub API cache of HRA.Status.CachedEntries We migrated to the transport-level cache introduced in #1127 so not only this is useless, it is harder to deduce which cache resulted in the desired replicas number calculated by HRA. Just remove the legacy cache to keep it simple and easy to understand. * Deprecate githubAPICacheDuration helm chart value and the --github-api-cache-duration as well * Fix integration test
This commit is contained in:
parent
301439b06a
commit
55ff4de79a
|
|
@ -1,5 +1,4 @@
|
|||
# Set actions-runner-controller settings for testing
|
||||
githubAPICacheDuration: 10s
|
||||
logLevel: "-3"
|
||||
githubWebhookServer:
|
||||
logLevel: "-3"
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ enableLeaderElection: true
|
|||
# Must be unique if more than one controller installed onto the same namespace.
|
||||
#leaderElectionId: "actions-runner-controller"
|
||||
|
||||
# DEPRECATED: This has been removed as unnecessary in #1192
|
||||
# The controller tries its best not to repeat the duplicate GitHub API call
|
||||
# within this duration.
|
||||
# Defaults to syncPeriod - 10s.
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import (
|
|||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/google/go-github/v39/github"
|
||||
|
|
@ -20,47 +19,6 @@ const (
|
|||
defaultScaleDownFactor = 0.7
|
||||
)
|
||||
|
||||
func getValueAvailableAt(now time.Time, from, to *time.Time, reservedValue int) *int {
|
||||
if to != nil && now.After(*to) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if from != nil && now.Before(*from) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &reservedValue
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) fetchSuggestedReplicasFromCache(hra v1alpha1.HorizontalRunnerAutoscaler) *int {
|
||||
var entry *v1alpha1.CacheEntry
|
||||
|
||||
for i := range hra.Status.CacheEntries {
|
||||
ent := hra.Status.CacheEntries[i]
|
||||
|
||||
if ent.Key != v1alpha1.CacheEntryKeyDesiredReplicas {
|
||||
continue
|
||||
}
|
||||
|
||||
if !time.Now().Before(ent.ExpirationTime.Time) {
|
||||
continue
|
||||
}
|
||||
|
||||
entry = &ent
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if entry != nil {
|
||||
v := getValueAvailableAt(time.Now(), nil, &entry.ExpirationTime.Time, entry.Value)
|
||||
if v != nil {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
if hra.Spec.MinReplicas == nil {
|
||||
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
||||
|
|
|
|||
|
|
@ -234,7 +234,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||
|
||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||
|
||||
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||
if err != nil {
|
||||
if tc.err == "" {
|
||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||
|
|
@ -502,7 +502,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||
|
||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||
|
||||
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||
if err != nil {
|
||||
if tc.err == "" {
|
||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||
|
|
|
|||
|
|
@ -307,7 +307,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) reconcile(ctx context.Context, re
|
|||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
newDesiredReplicas, computedReplicas, computedReplicasFromCache, err := r.computeReplicasWithCache(log, now, st, hra, minReplicas)
|
||||
newDesiredReplicas, err := r.computeReplicasWithCache(log, now, st, hra, minReplicas)
|
||||
if err != nil {
|
||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||
|
||||
|
|
@ -332,24 +332,6 @@ func (r *HorizontalRunnerAutoscalerReconciler) reconcile(ctx context.Context, re
|
|||
updated.Status.DesiredReplicas = &newDesiredReplicas
|
||||
}
|
||||
|
||||
if computedReplicasFromCache == nil {
|
||||
cacheEntries := getValidCacheEntries(updated, now)
|
||||
|
||||
var cacheDuration time.Duration
|
||||
|
||||
if r.CacheDuration > 0 {
|
||||
cacheDuration = r.CacheDuration
|
||||
} else {
|
||||
cacheDuration = 10 * time.Minute
|
||||
}
|
||||
|
||||
updated.Status.CacheEntries = append(cacheEntries, v1alpha1.CacheEntry{
|
||||
Key: v1alpha1.CacheEntryKeyDesiredReplicas,
|
||||
Value: computedReplicas,
|
||||
ExpirationTime: metav1.Time{Time: time.Now().Add(cacheDuration)},
|
||||
})
|
||||
}
|
||||
|
||||
var overridesSummary string
|
||||
|
||||
if (active != nil && upcoming == nil) || (active != nil && upcoming != nil && active.Period.EndTime.Before(upcoming.Period.StartTime)) {
|
||||
|
|
@ -384,18 +366,6 @@ func (r *HorizontalRunnerAutoscalerReconciler) reconcile(ctx context.Context, re
|
|||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func getValidCacheEntries(hra *v1alpha1.HorizontalRunnerAutoscaler, now time.Time) []v1alpha1.CacheEntry {
|
||||
var cacheEntries []v1alpha1.CacheEntry
|
||||
|
||||
for _, ent := range hra.Status.CacheEntries {
|
||||
if ent.ExpirationTime.After(now) {
|
||||
cacheEntries = append(cacheEntries, ent)
|
||||
}
|
||||
}
|
||||
|
||||
return cacheEntries
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "horizontalrunnerautoscaler-controller"
|
||||
if r.Name != "" {
|
||||
|
|
@ -488,32 +458,18 @@ func (r *HorizontalRunnerAutoscalerReconciler) getMinReplicas(log logr.Logger, n
|
|||
return minReplicas, active, upcoming, nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, int, *int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, error) {
|
||||
var suggestedReplicas int
|
||||
|
||||
suggestedReplicasFromCache := r.fetchSuggestedReplicasFromCache(hra)
|
||||
v, err := r.suggestDesiredReplicas(st, hra)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var cached *int
|
||||
|
||||
if suggestedReplicasFromCache != nil {
|
||||
cached = suggestedReplicasFromCache
|
||||
|
||||
if cached == nil {
|
||||
suggestedReplicas = minReplicas
|
||||
} else {
|
||||
suggestedReplicas = *cached
|
||||
}
|
||||
if v == nil {
|
||||
suggestedReplicas = minReplicas
|
||||
} else {
|
||||
v, err := r.suggestDesiredReplicas(st, hra)
|
||||
if err != nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
suggestedReplicas = minReplicas
|
||||
} else {
|
||||
suggestedReplicas = *v
|
||||
}
|
||||
suggestedReplicas = *v
|
||||
}
|
||||
|
||||
var reserved int
|
||||
|
|
@ -576,10 +532,6 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr
|
|||
kvs = append(kvs, "max", *maxReplicas)
|
||||
}
|
||||
|
||||
if cached != nil {
|
||||
kvs = append(kvs, "cached", *cached)
|
||||
}
|
||||
|
||||
if scaleDownDelayUntil != nil {
|
||||
kvs = append(kvs, "last_scale_up_time", *hra.Status.LastSuccessfulScaleOutTime)
|
||||
kvs = append(kvs, "scale_down_delay_until", scaleDownDelayUntil)
|
||||
|
|
@ -589,5 +541,5 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr
|
|||
kvs...,
|
||||
)
|
||||
|
||||
return newDesiredReplicas, suggestedReplicas, suggestedReplicasFromCache, nil
|
||||
return newDesiredReplicas, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,50 +0,0 @@
|
|||
package controllers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestGetValidCacheEntries(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||
Status: actionsv1alpha1.HorizontalRunnerAutoscalerStatus{
|
||||
CacheEntries: []actionsv1alpha1.CacheEntry{
|
||||
{
|
||||
Key: "foo",
|
||||
Value: 1,
|
||||
ExpirationTime: metav1.Time{Time: now.Add(-time.Second)},
|
||||
},
|
||||
{
|
||||
Key: "foo",
|
||||
Value: 2,
|
||||
ExpirationTime: metav1.Time{Time: now},
|
||||
},
|
||||
{
|
||||
Key: "foo",
|
||||
Value: 3,
|
||||
ExpirationTime: metav1.Time{Time: now.Add(time.Second)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
revs := getValidCacheEntries(hra, now)
|
||||
|
||||
counts := map[string]int{}
|
||||
|
||||
for _, r := range revs {
|
||||
counts[r.Key] += r.Value
|
||||
}
|
||||
|
||||
want := map[string]int{"foo": 3}
|
||||
|
||||
if d := cmp.Diff(want, counts); d != "" {
|
||||
t.Errorf("%s", d)
|
||||
}
|
||||
}
|
||||
|
|
@ -270,7 +270,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2)
|
||||
ExpectHRAStatusCacheEntryLengthEventuallyEquals(ctx, ns.Name, name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
|
|
@ -373,7 +372,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
ExpectHRAStatusCacheEntryLengthEventuallyEquals(ctx, ns.Name, name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
|
|
@ -1245,21 +1243,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||
})
|
||||
})
|
||||
|
||||
func ExpectHRAStatusCacheEntryLengthEventuallyEquals(ctx context.Context, ns string, name string, value int, optionalDescriptions ...interface{}) {
|
||||
EventuallyWithOffset(
|
||||
1,
|
||||
func() int {
|
||||
var hra actionsv1alpha1.HorizontalRunnerAutoscaler
|
||||
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: name}, &hra)
|
||||
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "failed to get test HRA resource")
|
||||
|
||||
return len(hra.Status.CacheEntries)
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(Equal(value), optionalDescriptions...)
|
||||
}
|
||||
|
||||
func ExpectHRADesiredReplicasEquals(ctx context.Context, ns, name string, desired int, optionalDescriptions ...interface{}) {
|
||||
var rd actionsv1alpha1.HorizontalRunnerAutoscaler
|
||||
|
||||
|
|
|
|||
4
main.go
4
main.go
|
|
@ -110,8 +110,8 @@ func main() {
|
|||
flag.StringVar(&c.BasicauthUsername, "github-basicauth-username", c.BasicauthUsername, "Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||
flag.StringVar(&c.BasicauthPassword, "github-basicauth-password", c.BasicauthPassword, "Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||
flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration")
|
||||
flag.DurationVar(&gitHubAPICacheDuration, "github-api-cache-duration", 0, "The duration until the GitHub API cache expires. Setting this to e.g. 10m results in the controller tries its best not to make the same API call within 10m to reduce the chance of being rate-limited. Defaults to mostly the same value as sync-period. If you're tweaking this in order to make autoscaling more responsive, you'll probably want to tweak sync-period, too")
|
||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change. . If you're tweaking this in order to make autoscaling more responsive, you'll probably want to tweak github-api-cache-duration, too")
|
||||
flag.DurationVar(&gitHubAPICacheDuration, "github-api-cache-duration", 0, "DEPRECATED: The duration until the GitHub API cache expires. Setting this to e.g. 10m results in the controller tries its best not to make the same API call within 10m to reduce the chance of being rate-limited. Defaults to mostly the same value as sync-period. If you're tweaking this in order to make autoscaling more responsive, you'll probably want to tweak sync-period, too")
|
||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change.")
|
||||
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions-runner-controller/actions-runner-controller/issues/321 for more information")
|
||||
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
|
||||
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||
|
|
|
|||
Loading…
Reference in New Issue