Do include Runner controller in integration test (#409)
So that we could catch bugs in runner controller like seen in #398, #404, and #407. Ref #400
This commit is contained in:
parent
3a0332dfdc
commit
07f822bb08
|
|
@ -71,7 +71,9 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
|||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: ns.Name,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
responses := &fake.FixedResponses{}
|
||||
|
|
@ -97,6 +99,21 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
|||
return fmt.Sprintf("%s%s", ns.Name, name)
|
||||
}
|
||||
|
||||
runnerController := &RunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
GitHubClient: env.ghClient,
|
||||
RunnerImage: "example/runner:test",
|
||||
DockerImage: "example/docker:test",
|
||||
Name: controllerName("runner"),
|
||||
RegistrationRecheckInterval: time.Millisecond,
|
||||
RegistrationRecheckJitter: time.Millisecond,
|
||||
}
|
||||
err = runnerController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
||||
|
||||
replicasetController := &RunnerReplicaSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
|
|
@ -106,7 +123,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
|||
Name: controllerName("runnerreplicaset"),
|
||||
}
|
||||
err = replicasetController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerreplicaset controller")
|
||||
|
||||
deploymentsController := &RunnerDeploymentReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
|
|
@ -116,7 +133,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
|||
Name: controllerName("runnnerdeployment"),
|
||||
}
|
||||
err = deploymentsController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerdeployment controller")
|
||||
|
||||
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
|
|
@ -128,7 +145,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
|||
Name: controllerName("horizontalrunnerautoscaler"),
|
||||
}
|
||||
err = autoscalerController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler controller")
|
||||
|
||||
autoscalerWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{
|
||||
Client: mgr.GetClient(),
|
||||
|
|
@ -475,10 +492,9 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||
env.SyncRunnerRegistrations()
|
||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
}
|
||||
|
||||
// Scale-up to 4 replicas on first check_run create webhook event
|
||||
|
|
@ -486,19 +502,19 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 4, "runners after first webhook event")
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(4, "count of fake list runners")
|
||||
env.SyncRunnerRegistrations()
|
||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 4)
|
||||
}
|
||||
|
||||
// Scale-up to 5 replicas on second check_run create webhook event
|
||||
{
|
||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 5, "runners after second webhook event")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||
env.SyncRunnerRegistrations()
|
||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 5)
|
||||
}
|
||||
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||
})
|
||||
|
||||
It("should create and scale organization's repository runners only on check_run event", func() {
|
||||
|
|
@ -1228,6 +1244,44 @@ func ExpectRunnerSetsCountEventuallyEquals(ctx context.Context, ns string, count
|
|||
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(count), optionalDescription...)
|
||||
}
|
||||
|
||||
func ExpectRunnerCountEventuallyEquals(ctx context.Context, ns string, count int, optionalDescription ...interface{}) {
|
||||
runners := actionsv1alpha1.RunnerList{Items: []actionsv1alpha1.Runner{}}
|
||||
|
||||
EventuallyWithOffset(
|
||||
1,
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
var running int
|
||||
|
||||
for _, r := range runners.Items {
|
||||
if r.Status.Phase == string(corev1.PodRunning) {
|
||||
running++
|
||||
} else {
|
||||
var pod corev1.Pod
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: r.Name}, &pod); err != nil {
|
||||
logf.Log.Error(err, "simulating pod controller")
|
||||
continue
|
||||
}
|
||||
|
||||
copy := pod.DeepCopy()
|
||||
copy.Status.Phase = corev1.PodRunning
|
||||
|
||||
if err := k8sClient.Status().Patch(ctx, copy, client.MergeFrom(&pod)); err != nil {
|
||||
logf.Log.Error(err, "simulating pod controller")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return running
|
||||
},
|
||||
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(count), optionalDescription...)
|
||||
}
|
||||
|
||||
func ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx context.Context, ns string, count int, optionalDescription ...interface{}) {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
|
|
|
|||
|
|
@ -52,12 +52,15 @@ const (
|
|||
// RunnerReconciler reconciles a Runner object
|
||||
type RunnerReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *github.Client
|
||||
RunnerImage string
|
||||
DockerImage string
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *github.Client
|
||||
RunnerImage string
|
||||
DockerImage string
|
||||
Name string
|
||||
RegistrationRecheckInterval time.Duration
|
||||
RegistrationRecheckJitter time.Duration
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
||||
|
|
@ -248,6 +251,9 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||
// saving API calls and scary{ log messages
|
||||
if !restart {
|
||||
registrationCheckInterval := time.Minute
|
||||
if r.RegistrationRecheckInterval > 0 {
|
||||
registrationCheckInterval = r.RegistrationRecheckInterval
|
||||
}
|
||||
|
||||
// We want to call ListRunners GitHub Actions API only once per runner per minute.
|
||||
// This if block, in conjunction with:
|
||||
|
|
@ -364,7 +370,12 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||
}
|
||||
|
||||
if (notFound || offline) && !registrationDidTimeout {
|
||||
registrationRecheckDelay = registrationCheckInterval + wait.Jitter(10*time.Second, 0.1)
|
||||
registrationRecheckJitter := 10 * time.Second
|
||||
if r.RegistrationRecheckJitter > 0 {
|
||||
registrationRecheckJitter = r.RegistrationRecheckJitter
|
||||
}
|
||||
|
||||
registrationRecheckDelay = registrationCheckInterval + wait.Jitter(registrationRecheckJitter, 0.1)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -778,6 +789,11 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||
|
||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runner-controller"
|
||||
if r.Name != "" {
|
||||
name = r.Name
|
||||
}
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
|
|
|
|||
|
|
@ -139,7 +139,9 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
|||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: ns.Name,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
controller := &RunnerDeploymentReconciler{
|
||||
|
|
@ -199,7 +201,7 @@ var _ = Context("Inside of a new namespace", func() {
|
|||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
|
|
@ -295,7 +297,7 @@ var _ = Context("Inside of a new namespace", func() {
|
|||
Replicas: intPtr(1),
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
|
|
@ -391,7 +393,7 @@ var _ = Context("Inside of a new namespace", func() {
|
|||
Replicas: intPtr(1),
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
|
|
|
|||
|
|
@ -47,7 +47,9 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
|||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: ns.Name,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
runnersList = fake.NewRunnersList()
|
||||
|
|
@ -127,7 +129,7 @@ var _ = Context("Inside of a new namespace", func() {
|
|||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
|
|
|
|||
|
|
@ -55,9 +55,17 @@ func TestAPIs(t *testing.T) {
|
|||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
|
||||
|
||||
var apiServerFlags []string
|
||||
|
||||
apiServerFlags = append(apiServerFlags, envtest.DefaultKubeAPIServerFlags...)
|
||||
// Avoids the following error:
|
||||
// 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"}
|
||||
apiServerFlags = append(apiServerFlags, "--allow-privileged=true")
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||
KubeAPIServerFlags: apiServerFlags,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
|
|
|||
Loading…
Reference in New Issue