parent
041eb9a5ea
commit
ed5a59655c
|
|
@ -26,8 +26,8 @@ func TestConfiguration(t *testing.T) {
|
|||
t.Parallel()
|
||||
namespace, ctx := setupTest(t)
|
||||
|
||||
defer ctx.Cleanup()
|
||||
|
||||
defer showLogsAndCleanup(t, ctx)
|
||||
|
||||
jenkinsCRName := "e2e"
|
||||
numberOfExecutors := 6
|
||||
numberOfExecutorsEnvName := "NUMBER_OF_EXECUTORS"
|
||||
|
|
@ -102,7 +102,7 @@ func TestPlugins(t *testing.T) {
|
|||
t.Parallel()
|
||||
namespace, ctx := setupTest(t)
|
||||
// Deletes test namespace
|
||||
defer ctx.Cleanup()
|
||||
defer showLogsAndCleanup(t, ctx)
|
||||
|
||||
jobID := "k8s-e2e"
|
||||
|
||||
|
|
|
|||
|
|
@ -2,40 +2,49 @@ package e2e
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
framework "github.com/operator-framework/operator-sdk/pkg/test"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/events/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
var (
|
||||
podLogTailLimit int64 = 15
|
||||
podLogTailLimit int64 = 15
|
||||
kubernetesEventsLimit int64 = 15
|
||||
// MUST match the labels in the deployment manifest: deploy/operator.yaml
|
||||
operatorPodLabels = map[string]string{
|
||||
"name": "jenkins-operator",
|
||||
}
|
||||
)
|
||||
|
||||
func getOperatorPod(t *testing.T, namespace string) *v1.Pod {
|
||||
func getOperatorPod(namespace string) (*v1.Pod, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(operatorPodLabels).String(),
|
||||
}
|
||||
|
||||
podList, err := framework.Global.KubeClient.CoreV1().Pods(namespace).List(listOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return nil, err
|
||||
}
|
||||
if len(podList.Items) != 1 {
|
||||
t.Fatalf("Expected exactly one pod, got: '%+v'", podList)
|
||||
return nil, fmt.Errorf("expected exactly one pod, got: '%+v'", podList)
|
||||
}
|
||||
|
||||
return &podList.Items[0]
|
||||
return &podList.Items[0], nil
|
||||
}
|
||||
|
||||
func getOperatorLogs(pod v1.Pod) (string, error) {
|
||||
func getOperatorLogs(namespace string) (string, error) {
|
||||
pod, err := getOperatorPod(namespace)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
logOptions := v1.PodLogOptions{TailLines: &podLogTailLimit}
|
||||
req := framework.Global.KubeClient.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &logOptions)
|
||||
podLogs, err := req.Stream()
|
||||
|
|
@ -59,14 +68,76 @@ func getOperatorLogs(pod v1.Pod) (string, error) {
|
|||
return logs, nil
|
||||
}
|
||||
|
||||
func failTestAndPrintLogs(t *testing.T, namespace string, err error) {
|
||||
operatorPod := getOperatorPod(t, namespace)
|
||||
logs, logsErr := getOperatorLogs(*operatorPod)
|
||||
if logsErr != nil {
|
||||
t.Errorf("Couldn't get pod logs: %s", logsErr)
|
||||
func printOperatorLogs(t *testing.T, namespace string) {
|
||||
t.Logf("Operator logs in '%s' namespace:\n", namespace)
|
||||
logs, err := getOperatorLogs(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get the operator pod logs: %s", err)
|
||||
} else {
|
||||
t.Logf("Last %d lines of log from operator:\n %s", podLogTailLimit, logs)
|
||||
}
|
||||
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
func getKubernetesEvents(namespace string) ([]v1beta1.Event, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
Limit: kubernetesEventsLimit,
|
||||
}
|
||||
|
||||
events, err := framework.Global.KubeClient.EventsV1beta1().Events(namespace).List(listOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.SliceStable(events.Items, func(i, j int) bool {
|
||||
return events.Items[i].CreationTimestamp.Unix() < events.Items[j].CreationTimestamp.Unix()
|
||||
})
|
||||
|
||||
return events.Items, nil
|
||||
}
|
||||
|
||||
func printKubernetesEvents(t *testing.T, namespace string) {
|
||||
t.Logf("Kubernetes events in '%s' namespace:\n", namespace)
|
||||
events, err := getKubernetesEvents(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get kubernetes events: %s", err)
|
||||
} else {
|
||||
t.Logf("Last %d events from kubernetes:\n", kubernetesEventsLimit)
|
||||
|
||||
for _, event := range events {
|
||||
t.Logf("%+v\n\n", event)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getKubernetesPods(namespace string) (*v1.PodList, error) {
|
||||
return framework.Global.KubeClient.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
}
|
||||
|
||||
func printKubernetesPods(t *testing.T, namespace string) {
|
||||
t.Logf("All pods in '%s' namespace:\n", namespace)
|
||||
podList, err := getKubernetesPods(namespace)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get kubernetes pods: %s", err)
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
t.Logf("%+v\n\n", pod)
|
||||
}
|
||||
}
|
||||
|
||||
func showLogsAndCleanup(t *testing.T, ctx *framework.TestCtx) {
|
||||
if t.Failed() {
|
||||
t.Log("Test failed. Bellow here you can check logs:")
|
||||
|
||||
namespace, err := ctx.GetNamespace()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get '%s' namespace", err)
|
||||
}
|
||||
|
||||
printOperatorLogs(t, namespace)
|
||||
printKubernetesEvents(t, namespace)
|
||||
printKubernetesPods(t, namespace)
|
||||
}
|
||||
|
||||
ctx.Cleanup()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ import (
|
|||
func TestJenkinsMasterPodRestart(t *testing.T) {
|
||||
t.Parallel()
|
||||
namespace, ctx := setupTest(t)
|
||||
// Deletes test namespace
|
||||
defer ctx.Cleanup()
|
||||
|
||||
defer showLogsAndCleanup(t, ctx)
|
||||
|
||||
jenkins := createJenkinsCR(t, "e2e", namespace, nil, v1alpha2.GroovyScripts{}, v1alpha2.ConfigurationAsCode{})
|
||||
waitForJenkinsBaseConfigurationToComplete(t, jenkins)
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ const pvcName = "pvc"
|
|||
func TestBackupAndRestore(t *testing.T) {
|
||||
t.Parallel()
|
||||
namespace, ctx := setupTest(t)
|
||||
// Deletes test namespace
|
||||
defer ctx.Cleanup()
|
||||
|
||||
defer showLogsAndCleanup(t, ctx)
|
||||
|
||||
jobID := "e2e-jenkins-operator"
|
||||
createPVC(t, namespace)
|
||||
|
|
|
|||
|
|
@ -43,8 +43,8 @@ func TestSeedJobs(t *testing.T) {
|
|||
}
|
||||
seedJobsConfig := loadSeedJobsConfig(t)
|
||||
namespace, ctx := setupTest(t)
|
||||
// Deletes test namespace
|
||||
defer ctx.Cleanup()
|
||||
|
||||
defer showLogsAndCleanup(t, ctx)
|
||||
|
||||
jenkinsCRName := "e2e"
|
||||
var seedJobs []v1alpha2.SeedJob
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ func waitForJenkinsBaseConfigurationToComplete(t *testing.T, jenkins *v1alpha2.J
|
|||
return err == nil && jenkins.Status.BaseConfigurationCompletedTime != nil
|
||||
})
|
||||
if err != nil {
|
||||
failTestAndPrintLogs(t, jenkins.Namespace, err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("Jenkins pod is running")
|
||||
|
||||
|
|
@ -85,7 +85,7 @@ func waitForRecreateJenkinsMasterPod(t *testing.T, jenkins *v1alpha2.Jenkins) {
|
|||
return podList.Items[0].DeletionTimestamp == nil, nil
|
||||
})
|
||||
if err != nil {
|
||||
failTestAndPrintLogs(t, jenkins.Namespace, err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("Jenkins pod has been recreated")
|
||||
}
|
||||
|
|
@ -97,7 +97,7 @@ func waitForJenkinsUserConfigurationToComplete(t *testing.T, jenkins *v1alpha2.J
|
|||
return err == nil && jenkins.Status.UserConfigurationCompletedTime != nil
|
||||
})
|
||||
if err != nil {
|
||||
failTestAndPrintLogs(t, jenkins.Namespace, err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("Jenkins pod is running")
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue