Code refactoring
This commit is contained in:
parent
a6a85bcbf0
commit
d07ebcd507
|
|
@ -36,12 +36,6 @@ spec:
|
|||
repositoryUrl: https://github.com/VirtusLab/jenkins-operator-e2e.git
|
||||
```
|
||||
|
||||
Create Jenkins Custom Resource:
|
||||
|
||||
```bash
|
||||
kubectl create -f deploy/crds/virtuslab_v1alpha1_jenkins_cr.yaml
|
||||
```
|
||||
|
||||
Watch Jenkins instance being created:
|
||||
|
||||
```bash
|
||||
|
|
@ -115,13 +109,13 @@ spec:
|
|||
|
||||
Jenkins operator will automatically configure and trigger Seed Job Pipeline for all entries from `Jenkins.spec.seedJobs`.
|
||||
|
||||
[job-dsl]:https://github.com/jenkinsci/job-dsl-plugin
|
||||
[ssh-credentials]:https://github.com/jenkinsci/ssh-credentials-plugin
|
||||
|
||||
## Install Plugins
|
||||
|
||||
## Configure Authorization
|
||||
|
||||
## Configure Backup & Restore
|
||||
|
||||
## Debugging
|
||||
## Debugging
|
||||
|
||||
[job-dsl]:https://github.com/jenkinsci/job-dsl-plugin
|
||||
[ssh-credentials]:https://github.com/jenkinsci/ssh-credentials-plugin
|
||||
|
|
@ -27,7 +27,13 @@ kubectl create -f deploy/role_binding.yaml
|
|||
kubectl create -f deploy/operator.yaml
|
||||
```
|
||||
|
||||
Now **jenkins-operator** should be up and running within `default` namespace.
|
||||
Watch **jenkins-operator** instance being created:
|
||||
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
Now **jenkins-operator** should be up and running in `default` namespace.
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -34,12 +34,32 @@ type JenkinsStatus struct {
|
|||
Builds []Build `json:"builds,omitempty"`
|
||||
}
|
||||
|
||||
// BuildStatus defines type of Jenkins build job status
|
||||
type BuildStatus string
|
||||
|
||||
const (
|
||||
// BuildSuccessStatus - the build had no errors
|
||||
BuildSuccessStatus BuildStatus = "success"
|
||||
// BuildUnstableStatus - the build had some errors but they were not fatal. For example, some tests failed
|
||||
BuildUnstableStatus BuildStatus = "unstable"
|
||||
// BuildNotBuildStatus - this status code is used in a multi-stage build (like maven2) where a problem in earlier stage prevented later stages from building
|
||||
BuildNotBuildStatus BuildStatus = "not_build"
|
||||
// BuildFailureStatus - the build had a fatal error
|
||||
BuildFailureStatus BuildStatus = "failure"
|
||||
// BuildAbortedStatus - the build was manually aborted
|
||||
BuildAbortedStatus BuildStatus = "aborted"
|
||||
// BuildRunningStatus - this is custom build status for running build, not present in jenkins build result
|
||||
BuildRunningStatus BuildStatus = "running"
|
||||
// BuildExpiredStatus - this is custom build status for expired build, not present in jenkins build result
|
||||
BuildExpiredStatus BuildStatus = "expired"
|
||||
)
|
||||
|
||||
// Build defines Jenkins Build status with corresponding metadata
|
||||
type Build struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
JobName string `json:"jobName,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
Number int64 `json:"number,omitempty"`
|
||||
Status string `json:"status,omitempty"` // from https://javadoc.jenkins-ci.org/hudson/model/Result.html
|
||||
Status BuildStatus `json:"status,omitempty"`
|
||||
Retires int `json:"retries,omitempty"`
|
||||
CreateTime *metav1.Time `json:"createTime,omitempty"`
|
||||
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
|
|
|
|||
|
|
@ -38,23 +38,12 @@ func New(k8sClient k8s.Client, jenkinsClient jenkinsclient.Jenkins, logger logr.
|
|||
func (r *ReconcileUserConfiguration) Reconcile() (*reconcile.Result, error) {
|
||||
// reconcile seed jobs
|
||||
result, err := r.reconcileSeedJobs()
|
||||
if err != nil {
|
||||
if err != nil || result != nil {
|
||||
return result, err
|
||||
}
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// reconcile custom groovy scripts
|
||||
result, err = r.reconcileCustomGroovy()
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if result != nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return r.reconcileCustomGroovy()
|
||||
}
|
||||
|
||||
func (r *ReconcileUserConfiguration) reconcileSeedJobs() (*reconcile.Result, error) {
|
||||
|
|
|
|||
|
|
@ -122,6 +122,7 @@ func (s *SeedJobs) privateKeyFromSecret(namespace string, seedJob virtuslabv1alp
|
|||
return "", nil
|
||||
}
|
||||
|
||||
// FIXME(antoniaklja) use mask-password plugin for params.PRIVATE_KEY
|
||||
// seedJobConfigXML this is the XML representation of seed job
|
||||
var seedJobConfigXML = `
|
||||
<flow-definition plugin="workflow-job@2.30">
|
||||
|
|
|
|||
|
|
@ -3,12 +3,10 @@ package seedjobs
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
virtuslabv1alpha1 "github.com/VirtusLab/jenkins-operator/pkg/apis/virtuslab/v1alpha1"
|
||||
"github.com/VirtusLab/jenkins-operator/pkg/controller/jenkins/client"
|
||||
"github.com/VirtusLab/jenkins-operator/pkg/controller/jenkins/jobs"
|
||||
|
||||
"github.com/bndr/gojenkins"
|
||||
"github.com/golang/mock/gomock"
|
||||
|
|
@ -22,11 +20,6 @@ import (
|
|||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
virtuslabv1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestEnsureSeedJobs(t *testing.T) {
|
||||
// given
|
||||
logger := logf.ZapLogger(false)
|
||||
|
|
@ -36,10 +29,13 @@ func TestEnsureSeedJobs(t *testing.T) {
|
|||
|
||||
jenkinsClient := client.NewMockJenkins(ctrl)
|
||||
fakeClient := fake.NewFakeClient()
|
||||
err := virtuslabv1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme)
|
||||
assert.NoError(t, err)
|
||||
|
||||
jenkins := jenkinsCustomResource()
|
||||
err := fakeClient.Create(ctx, jenkins)
|
||||
err = fakeClient.Create(ctx, jenkins)
|
||||
assert.NoError(t, err)
|
||||
buildNumber := int64(1)
|
||||
|
||||
for reconcileAttempt := 1; reconcileAttempt <= 2; reconcileAttempt++ {
|
||||
logger.Info(fmt.Sprintf("Reconcile attempt #%d", reconcileAttempt))
|
||||
|
|
@ -58,7 +54,7 @@ func TestEnsureSeedJobs(t *testing.T) {
|
|||
GetJob(ConfigureSeedJobsName).
|
||||
Return(&gojenkins.Job{
|
||||
Raw: &gojenkins.JobResponse{
|
||||
NextBuildNumber: int64(1),
|
||||
NextBuildNumber: buildNumber,
|
||||
},
|
||||
}, nil)
|
||||
|
||||
|
|
@ -80,7 +76,7 @@ func TestEnsureSeedJobs(t *testing.T) {
|
|||
GetBuild(ConfigureSeedJobsName, gomock.Any()).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: jobs.SuccessStatus,
|
||||
Result: string(virtuslabv1alpha1.BuildSuccessStatus),
|
||||
},
|
||||
}, nil)
|
||||
}
|
||||
|
|
@ -91,15 +87,27 @@ func TestEnsureSeedJobs(t *testing.T) {
|
|||
err = fakeClient.Get(ctx, types.NamespacedName{Name: jenkins.Name, Namespace: jenkins.Namespace}, jenkins)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(jenkins.Status.Builds), "There is one running job")
|
||||
build := jenkins.Status.Builds[0]
|
||||
assert.Equal(t, buildNumber, build.Number)
|
||||
assert.Equal(t, ConfigureSeedJobsName, build.JobName)
|
||||
assert.NotNil(t, build.CreateTime)
|
||||
assert.NotEmpty(t, build.Hash)
|
||||
assert.NotNil(t, build.LastUpdateTime)
|
||||
assert.Equal(t, 0, build.Retires)
|
||||
|
||||
// first run - should create job and schedule build
|
||||
if reconcileAttempt == 1 {
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, string(virtuslabv1alpha1.BuildRunningStatus), string(build.Status))
|
||||
}
|
||||
|
||||
// second run - should update and finish job
|
||||
if reconcileAttempt == 2 {
|
||||
assert.True(t, done)
|
||||
assert.Equal(t, string(virtuslabv1alpha1.BuildSuccessStatus), string(build.Status))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package theme
|
||||
|
||||
// SetThemeGroovyScript it's a groovy script which set custom jenkins theme
|
||||
// TODO move to base configuration
|
||||
var SetThemeGroovyScript = `
|
||||
import jenkins.*
|
||||
import jenkins.model.*
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
|
|
@ -11,18 +12,17 @@ import (
|
|||
"github.com/VirtusLab/jenkins-operator/pkg/log"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// Validate validates Jenkins CR Spec section
|
||||
func (r *ReconcileUserConfiguration) Validate(jenkins *virtuslabv1alpha1.Jenkins) bool {
|
||||
if !r.validateSeedJobs(jenkins) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
func (r *ReconcileUserConfiguration) Validate(jenkins *virtuslabv1alpha1.Jenkins) (bool, error) {
|
||||
return r.validateSeedJobs(jenkins)
|
||||
}
|
||||
|
||||
func (r *ReconcileUserConfiguration) validateSeedJobs(jenkins *virtuslabv1alpha1.Jenkins) bool {
|
||||
func (r *ReconcileUserConfiguration) validateSeedJobs(jenkins *virtuslabv1alpha1.Jenkins) (bool, error) {
|
||||
valid := true
|
||||
if jenkins.Spec.SeedJobs != nil {
|
||||
for _, seedJob := range jenkins.Spec.SeedJobs {
|
||||
logger := r.logger.WithValues("seedJob", fmt.Sprintf("%+v", seedJob)).V(log.VWarn)
|
||||
|
|
@ -30,14 +30,14 @@ func (r *ReconcileUserConfiguration) validateSeedJobs(jenkins *virtuslabv1alpha1
|
|||
// validate seed job id is not empty
|
||||
if len(seedJob.ID) == 0 {
|
||||
logger.Info("seed job id can't be empty")
|
||||
return false
|
||||
valid = false
|
||||
}
|
||||
|
||||
// validate repository url match private key
|
||||
if strings.Contains(seedJob.RepositoryURL, "git@") {
|
||||
if seedJob.PrivateKey.SecretKeyRef == nil {
|
||||
logger.Info("private key can't be empty while using ssh repository url")
|
||||
return false
|
||||
valid = false
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -46,43 +46,44 @@ func (r *ReconcileUserConfiguration) validateSeedJobs(jenkins *virtuslabv1alpha1
|
|||
deployKeySecret := &v1.Secret{}
|
||||
namespaceName := types.NamespacedName{Namespace: jenkins.Namespace, Name: seedJob.PrivateKey.SecretKeyRef.Name}
|
||||
err := r.k8sClient.Get(context.TODO(), namespaceName, deployKeySecret)
|
||||
//TODO(bantoniak) handle error properly
|
||||
if err != nil {
|
||||
logger.Info("couldn't read private key secret")
|
||||
return false
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
logger.Info("secret not found")
|
||||
valid = false
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
privateKey := string(deployKeySecret.Data[seedJob.PrivateKey.SecretKeyRef.Key])
|
||||
if privateKey == "" {
|
||||
logger.Info("private key is empty")
|
||||
return false
|
||||
valid = false
|
||||
}
|
||||
|
||||
if !validatePrivateKey(privateKey) {
|
||||
logger.Info("private key is invalid")
|
||||
return false
|
||||
if err := validatePrivateKey(privateKey); err != nil {
|
||||
logger.Info(fmt.Sprintf("private key is invalid: %s", err))
|
||||
valid = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
return valid, nil
|
||||
}
|
||||
|
||||
func validatePrivateKey(privateKey string) bool {
|
||||
func validatePrivateKey(privateKey string) error {
|
||||
block, _ := pem.Decode([]byte(privateKey))
|
||||
if block == nil {
|
||||
return false
|
||||
return errors.New("failed to decode PEM block")
|
||||
}
|
||||
|
||||
priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return false
|
||||
return err
|
||||
}
|
||||
|
||||
err = priv.Validate()
|
||||
if err != nil {
|
||||
return false
|
||||
return err
|
||||
}
|
||||
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -190,9 +190,13 @@ func TestValidateUserConfiguration(t *testing.T) {
|
|||
for _, testingData := range data {
|
||||
t.Run(fmt.Sprintf("Testing '%s'", testingData.description), func(t *testing.T) {
|
||||
fakeClient := fake.NewFakeClient()
|
||||
fakeClient.Create(context.TODO(), testingData.secret)
|
||||
if testingData.secret != nil {
|
||||
err := fakeClient.Create(context.TODO(), testingData.secret)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
userReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false), nil)
|
||||
result := userReconcileLoop.Validate(testingData.jenkins)
|
||||
result, err := userReconcileLoop.Validate(testingData.jenkins)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testingData.expectedResult, result)
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ func (g *Groovy) EnsureGroovyJob(groovyScript string, jenkins *virtuslabv1alpha1
|
|||
return done, nil
|
||||
}
|
||||
|
||||
// FIXME(antoniaklja) use mask-password plugin for params.GROOVY_SCRIPT
|
||||
// TODO add groovy script name
|
||||
var groovyJobConfigXML = `
|
||||
<flow-definition plugin="workflow-job@2.30">
|
||||
<actions/>
|
||||
|
|
@ -73,7 +75,7 @@ var groovyJobConfigXML = `
|
|||
<hudson.model.ParametersDefinitionProperty>
|
||||
<parameterDefinitions>
|
||||
<hudson.model.TextParameterDefinition>
|
||||
<name>GROOVY_SCRIPT</name>
|
||||
<name>` + groovyScriptParameterName + `</name>
|
||||
<description></description>
|
||||
<defaultValue></defaultValue>
|
||||
<trim>false</trim>
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// TODO Modify this to be the types you create that are owned by the primary resource
|
||||
// Watch for changes to secondary resource Pods and requeue the owner Jenkins
|
||||
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{
|
||||
IsController: true,
|
||||
|
|
@ -121,7 +122,11 @@ func (r *ReconcileJenkins) Reconcile(request reconcile.Request) (reconcile.Resul
|
|||
|
||||
// Reconcile user configuration
|
||||
userConfiguration := user.New(r.client, jenkinsClient, logger, jenkins)
|
||||
if !userConfiguration.Validate(jenkins) {
|
||||
valid, err := userConfiguration.Validate(jenkins)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if !valid {
|
||||
logger.V(log.VWarn).Info("Validation of user configuration failed, please correct Jenkins CR")
|
||||
return reconcile.Result{}, nil // don't requeue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,26 +15,7 @@ import (
|
|||
k8s "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
// SuccessStatus - the build had no errors
|
||||
SuccessStatus = "success"
|
||||
// UnstableStatus - the build had some errors but they were not fatal. For example, some tests failed
|
||||
UnstableStatus = "unstable"
|
||||
// NotBuildStatus - this status code is used in a multi-stage build (like maven2) where a problem in earlier stage prevented later stages from building
|
||||
NotBuildStatus = "not_build"
|
||||
// FailureStatus - the build had a fatal error
|
||||
FailureStatus = "failure"
|
||||
// AbortedStatus - the build was manually aborted
|
||||
AbortedStatus = "aborted"
|
||||
// RunningStatus - this is custom build status for running build, not present in jenkins build result
|
||||
RunningStatus = "running"
|
||||
// ExpiredStatus - this is custom build status for expired build, not present in jenkins build result
|
||||
ExpiredStatus = "expired"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorEmptyJenkinsCR - this is custom error returned when jenkins custom resource is empty
|
||||
ErrorEmptyJenkinsCR = errors.New("empty jenkins cr")
|
||||
// ErrorUnexpectedBuildStatus - this is custom error returned when jenkins build has unexpected status
|
||||
ErrorUnexpectedBuildStatus = errors.New("unexpected build status")
|
||||
// ErrorBuildFailed - this is custom error returned when jenkins build has failed
|
||||
|
|
@ -70,47 +51,47 @@ func New(jenkinsClient client.Jenkins, k8sClient k8s.Client, logger logr.Logger)
|
|||
// entire state is saved in Jenkins.Status.Builds section
|
||||
// function return 'true' when build finished successfully or false when reconciliation loop should requeue this function
|
||||
// preserveStatus determines that build won't be removed from Jenkins.Status.Builds section
|
||||
func (jobs *Jobs) EnsureBuildJob(name, hash string, parameters map[string]string, jenkins *virtuslabv1alpha1.Jenkins, preserveStatus bool) (done bool, err error) {
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring build, name:'%s' hash:'%s'", name, hash))
|
||||
func (jobs *Jobs) EnsureBuildJob(jobName, hash string, parameters map[string]string, jenkins *virtuslabv1alpha1.Jenkins, preserveStatus bool) (done bool, err error) {
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring build, name:'%s' hash:'%s'", jobName, hash))
|
||||
|
||||
build, err := jobs.getBuildFromStatus(name, hash, jenkins)
|
||||
build, err := jobs.getBuildFromStatus(jobName, hash, jenkins)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if build != nil {
|
||||
jobs.logger.Info(fmt.Sprintf("Build exists in status, name:'%s' hash:'%s' status: '%s'", name, hash, build.Status))
|
||||
switch strings.ToLower(build.Status) {
|
||||
case SuccessStatus:
|
||||
jobs.logger.Info(fmt.Sprintf("Build exists in status, %+v", build))
|
||||
switch build.Status {
|
||||
case virtuslabv1alpha1.BuildSuccessStatus:
|
||||
return jobs.ensureSuccessBuild(*build, jenkins, preserveStatus)
|
||||
case RunningStatus:
|
||||
case virtuslabv1alpha1.BuildRunningStatus:
|
||||
return jobs.ensureRunningBuild(*build, jenkins, preserveStatus)
|
||||
case UnstableStatus, NotBuildStatus, FailureStatus, AbortedStatus:
|
||||
case virtuslabv1alpha1.BuildUnstableStatus, virtuslabv1alpha1.BuildNotBuildStatus, virtuslabv1alpha1.BuildFailureStatus, virtuslabv1alpha1.BuildAbortedStatus:
|
||||
return jobs.ensureFailedBuild(*build, jenkins, parameters, preserveStatus)
|
||||
case ExpiredStatus:
|
||||
case virtuslabv1alpha1.BuildExpiredStatus:
|
||||
return jobs.ensureExpiredBuild(*build, jenkins, preserveStatus)
|
||||
default:
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Unexpected build status, name:'%s' hash:'%s' status:'%s'", name, hash, build.Status))
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Unexpected build status, %+v", build))
|
||||
return false, ErrorUnexpectedBuildStatus
|
||||
}
|
||||
}
|
||||
|
||||
// build is run first time - build job and update status
|
||||
jobs.logger.Info(fmt.Sprintf("Build doesn't exist, running and updating status, name:'%s' hash:'%s'", name, hash))
|
||||
jobs.logger.Info(fmt.Sprintf("Build doesn't exist, running and updating status, %+v", build))
|
||||
created := metav1.Now()
|
||||
newBuild := virtuslabv1alpha1.Build{
|
||||
Name: name,
|
||||
JobName: jobName,
|
||||
Hash: hash,
|
||||
CreateTime: &created,
|
||||
}
|
||||
return jobs.buildJob(newBuild, parameters, jenkins)
|
||||
}
|
||||
|
||||
func (jobs *Jobs) getBuildFromStatus(name string, hash string, jenkins *virtuslabv1alpha1.Jenkins) (*virtuslabv1alpha1.Build, error) {
|
||||
func (jobs *Jobs) getBuildFromStatus(jobName string, hash string, jenkins *virtuslabv1alpha1.Jenkins) (*virtuslabv1alpha1.Build, error) {
|
||||
if jenkins != nil {
|
||||
builds := jenkins.Status.Builds
|
||||
for _, build := range builds {
|
||||
if build.Name == name && build.Hash == hash {
|
||||
if build.JobName == jobName && build.Hash == hash {
|
||||
return &build, nil
|
||||
}
|
||||
}
|
||||
|
|
@ -119,18 +100,13 @@ func (jobs *Jobs) getBuildFromStatus(name string, hash string, jenkins *virtusla
|
|||
}
|
||||
|
||||
func (jobs *Jobs) ensureSuccessBuild(build virtuslabv1alpha1.Build, jenkins *virtuslabv1alpha1.Jenkins, preserveStatus bool) (bool, error) {
|
||||
if jenkins == nil {
|
||||
jobs.logger.V(log.VWarn).Info("Jenkins CR is empty")
|
||||
return false, ErrorEmptyJenkinsCR
|
||||
}
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring success build, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring success build, %+v", build))
|
||||
|
||||
if !preserveStatus {
|
||||
err := jobs.removeBuildFromStatus(build, jenkins)
|
||||
jobs.logger.Info(fmt.Sprintf("Removing build from status, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.Info(fmt.Sprintf("Removing build from status, %+v", build))
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't remove build from status, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't remove build from status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
|
@ -138,41 +114,36 @@ func (jobs *Jobs) ensureSuccessBuild(build virtuslabv1alpha1.Build, jenkins *vir
|
|||
}
|
||||
|
||||
func (jobs *Jobs) ensureRunningBuild(build virtuslabv1alpha1.Build, jenkins *virtuslabv1alpha1.Jenkins, preserveStatus bool) (bool, error) {
|
||||
if jenkins == nil {
|
||||
jobs.logger.V(log.VWarn).Info("Jenkins CR is empty")
|
||||
return false, ErrorEmptyJenkinsCR
|
||||
}
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring running build, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring running build, %+v", build))
|
||||
// FIXME (antoniaklja) implement build expiration
|
||||
|
||||
jenkinsBuild, err := jobs.jenkinsClient.GetBuild(build.Name, build.Number)
|
||||
jenkinsBuild, err := jobs.jenkinsClient.GetBuild(build.JobName, build.Number)
|
||||
if isNotFoundError(err) {
|
||||
jobs.logger.Info(fmt.Sprintf("Build still running , name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.Info(fmt.Sprintf("Build still running , %+v", build))
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't get jenkins build, name:'%s' number:'%d'", build.Name, build.Number))
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't get jenkins build, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
|
||||
if jenkinsBuild.GetResult() != "" {
|
||||
build.Status = strings.ToLower(jenkinsBuild.GetResult())
|
||||
build.Status = virtuslabv1alpha1.BuildStatus(strings.ToLower(jenkinsBuild.GetResult()))
|
||||
}
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("Updating build status, name:'%s' hash:'%s' status:'%s'", build.Name, build.Hash, build.Status))
|
||||
err = jobs.updateBuildStatus(build, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't update build status, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't update build status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
|
||||
if build.Status == SuccessStatus {
|
||||
jobs.logger.Info(fmt.Sprintf("Build finished successfully, name:'%s' hash:'%s' status:'%s'", build.Name, build.Hash, build.Status))
|
||||
if build.Status == virtuslabv1alpha1.BuildSuccessStatus {
|
||||
jobs.logger.Info(fmt.Sprintf("Build finished successfully, %+v", build))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if build.Status == FailureStatus || build.Status == UnstableStatus || build.Status == NotBuildStatus || build.Status == AbortedStatus {
|
||||
jobs.logger.Info(fmt.Sprintf("Build failed, name:'%s' hash:'%s' status:'%s'", build.Name, build.Hash, build.Status))
|
||||
if build.Status == virtuslabv1alpha1.BuildFailureStatus || build.Status == virtuslabv1alpha1.BuildUnstableStatus ||
|
||||
build.Status == virtuslabv1alpha1.BuildNotBuildStatus || build.Status == virtuslabv1alpha1.BuildAbortedStatus {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Build failed, %+v", build))
|
||||
return false, ErrorBuildFailed
|
||||
}
|
||||
|
||||
|
|
@ -180,31 +151,26 @@ func (jobs *Jobs) ensureRunningBuild(build virtuslabv1alpha1.Build, jenkins *vir
|
|||
}
|
||||
|
||||
func (jobs *Jobs) ensureFailedBuild(build virtuslabv1alpha1.Build, jenkins *virtuslabv1alpha1.Jenkins, parameters map[string]string, preserveStatus bool) (bool, error) {
|
||||
if jenkins == nil {
|
||||
jobs.logger.V(log.VWarn).Info("Jenkins CR is empty")
|
||||
return false, ErrorEmptyJenkinsCR
|
||||
}
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring failed build, name:'%s' hash:'%s' status: '%s'", build.Name, build.Hash, build.Status))
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring failed build, %+v", build))
|
||||
|
||||
if build.Retires < BuildRetires {
|
||||
jobs.logger.Info(fmt.Sprintf("Retrying build, name:'%s' hash:'%s' retries: '%d'", build.Name, build.Hash, build.Retires))
|
||||
jobs.logger.Info(fmt.Sprintf("Retrying build, %+v", build))
|
||||
build.Retires = build.Retires + 1
|
||||
_, err := jobs.buildJob(build, parameters, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't retry build, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't retry build, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("The retries limit was reached , name:'%s' hash:'%s' retries: '%d'", build.Name, build.Hash, build.Retires))
|
||||
jobs.logger.Info(fmt.Sprintf("The retries limit was reached , %+v", build))
|
||||
|
||||
if !preserveStatus {
|
||||
jobs.logger.Info(fmt.Sprintf("Removing build from status, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.Info(fmt.Sprintf("Removing build from status, %+v", build))
|
||||
err := jobs.removeBuildFromStatus(build, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't remove build from status, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't remove build from status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
|
@ -212,14 +178,9 @@ func (jobs *Jobs) ensureFailedBuild(build virtuslabv1alpha1.Build, jenkins *virt
|
|||
}
|
||||
|
||||
func (jobs *Jobs) ensureExpiredBuild(build virtuslabv1alpha1.Build, jenkins *virtuslabv1alpha1.Jenkins, preserveStatus bool) (bool, error) {
|
||||
if jenkins == nil {
|
||||
jobs.logger.V(log.VWarn).Info("Jenkins CR is empty")
|
||||
return false, ErrorEmptyJenkinsCR
|
||||
}
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring expired build, %+v", build))
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("Ensuring expired build, name:'%s' hash:'%s' status: '%s'", build.Name, build.Hash, build.Status))
|
||||
|
||||
jenkinsBuild, err := jobs.jenkinsClient.GetBuild(build.Name, build.Number)
|
||||
jenkinsBuild, err := jobs.jenkinsClient.GetBuild(build.JobName, build.Number)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -229,12 +190,12 @@ func (jobs *Jobs) ensureExpiredBuild(build virtuslabv1alpha1.Build, jenkins *vir
|
|||
return false, err
|
||||
}
|
||||
|
||||
jenkinsBuild, err = jobs.jenkinsClient.GetBuild(build.Name, build.Number)
|
||||
jenkinsBuild, err = jobs.jenkinsClient.GetBuild(build.JobName, build.Number)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if jenkinsBuild.GetResult() != AbortedStatus {
|
||||
if virtuslabv1alpha1.BuildStatus(jenkinsBuild.GetResult()) != virtuslabv1alpha1.BuildAbortedStatus {
|
||||
return false, ErrorAbortBuildFailed
|
||||
}
|
||||
|
||||
|
|
@ -246,10 +207,10 @@ func (jobs *Jobs) ensureExpiredBuild(build virtuslabv1alpha1.Build, jenkins *vir
|
|||
// TODO(antoniaklja) clean up k8s resources
|
||||
|
||||
if !preserveStatus {
|
||||
jobs.logger.Info(fmt.Sprintf("Removing build from status, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.Info(fmt.Sprintf("Removing build from status, %+v", build))
|
||||
err = jobs.removeBuildFromStatus(build, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't remove build from status, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't remove build from status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
|
@ -258,13 +219,9 @@ func (jobs *Jobs) ensureExpiredBuild(build virtuslabv1alpha1.Build, jenkins *vir
|
|||
}
|
||||
|
||||
func (jobs *Jobs) removeBuildFromStatus(build virtuslabv1alpha1.Build, jenkins *virtuslabv1alpha1.Jenkins) error {
|
||||
if jenkins == nil {
|
||||
return ErrorEmptyJenkinsCR
|
||||
}
|
||||
|
||||
builds := make([]virtuslabv1alpha1.Build, len(jenkins.Status.Builds), len(jenkins.Status.Builds))
|
||||
for _, existingBuild := range jenkins.Status.Builds {
|
||||
if existingBuild.Name != build.Name && existingBuild.Hash != build.Hash {
|
||||
if existingBuild.JobName != build.JobName && existingBuild.Hash != build.Hash {
|
||||
builds = append(builds, existingBuild)
|
||||
}
|
||||
}
|
||||
|
|
@ -278,49 +235,37 @@ func (jobs *Jobs) removeBuildFromStatus(build virtuslabv1alpha1.Build, jenkins *
|
|||
}
|
||||
|
||||
func (jobs *Jobs) buildJob(build virtuslabv1alpha1.Build, parameters map[string]string, jenkins *virtuslabv1alpha1.Jenkins) (bool, error) {
|
||||
if jenkins == nil {
|
||||
return false, ErrorEmptyJenkinsCR
|
||||
}
|
||||
|
||||
nextBuildNumber := int64(1)
|
||||
job, err := jobs.jenkinsClient.GetJob(build.Name)
|
||||
job, err := jobs.jenkinsClient.GetJob(build.JobName)
|
||||
if err != nil {
|
||||
jobs.logger.Info(fmt.Sprintf("Couldn't find jenkins job, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.Info(fmt.Sprintf("Couldn't find jenkins job, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
nextBuildNumber := job.GetDetails().NextBuildNumber
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("Running build, %+v", build))
|
||||
_, err = jobs.jenkinsClient.BuildJob(build.JobName, parameters)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't run build, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
|
||||
if job != nil {
|
||||
nextBuildNumber = job.GetDetails().NextBuildNumber
|
||||
}
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("Running build, name:'%s' hash:'%s' number:'%d'", build.Name, build.Hash, nextBuildNumber))
|
||||
_, err = jobs.jenkinsClient.BuildJob(build.Name, parameters)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't run build, name:'%s' hash:'%s' number:'%d'", build.Name, build.Hash, nextBuildNumber))
|
||||
return false, err
|
||||
}
|
||||
|
||||
build.Status = RunningStatus
|
||||
build.Status = virtuslabv1alpha1.BuildRunningStatus
|
||||
build.Number = nextBuildNumber
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("Updating build status, name:'%s' hash:'%s' status:'%s' number:'%d'", build.Name, build.Hash, build.Status, build.Number))
|
||||
err = jobs.updateBuildStatus(build, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't update build status, name:'%s' hash:'%s'", build.Name, build.Hash))
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't update build status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (jobs *Jobs) updateBuildStatus(build virtuslabv1alpha1.Build, jenkins *virtuslabv1alpha1.Jenkins) error {
|
||||
if jenkins == nil {
|
||||
return ErrorEmptyJenkinsCR
|
||||
}
|
||||
|
||||
jobs.logger.Info(fmt.Sprintf("Updating build status, %+v", build))
|
||||
// get index of existing build from status if exists
|
||||
buildIndex := -1
|
||||
for index, existingBuild := range jenkins.Status.Builds {
|
||||
if build.Name == existingBuild.Name && build.Hash == existingBuild.Hash {
|
||||
if build.JobName == existingBuild.JobName && build.Hash == existingBuild.Hash {
|
||||
buildIndex = index
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
virtuslabv1alpha1 "github.com/VirtusLab/jenkins-operator/pkg/apis/virtuslab/v1alpha1"
|
||||
|
|
@ -23,11 +22,6 @@ import (
|
|||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
virtuslabv1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestSuccessEnsureJob(t *testing.T) {
|
||||
// given
|
||||
ctx := context.TODO()
|
||||
|
|
@ -35,15 +29,17 @@ func TestSuccessEnsureJob(t *testing.T) {
|
|||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
buildName := "Test Job"
|
||||
jobName := "Test Job"
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(buildName))
|
||||
hash.Write([]byte(jobName))
|
||||
encodedHash := base64.URLEncoding.EncodeToString(hash.Sum(nil))
|
||||
|
||||
// when
|
||||
jenkins := jenkinsCustomResource()
|
||||
fakeClient := fake.NewFakeClient()
|
||||
err := fakeClient.Create(ctx, jenkins)
|
||||
err := virtuslabv1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme)
|
||||
assert.NoError(t, err)
|
||||
err = fakeClient.Create(ctx, jenkins)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for reconcileAttempt := 1; reconcileAttempt <= 2; reconcileAttempt++ {
|
||||
|
|
@ -54,7 +50,7 @@ func TestSuccessEnsureJob(t *testing.T) {
|
|||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetJob(buildName).
|
||||
GetJob(jobName).
|
||||
Return(&gojenkins.Job{
|
||||
Raw: &gojenkins.JobResponse{
|
||||
NextBuildNumber: buildNumber,
|
||||
|
|
@ -63,19 +59,19 @@ func TestSuccessEnsureJob(t *testing.T) {
|
|||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
BuildJob(buildName, gomock.Any()).
|
||||
BuildJob(jobName, gomock.Any()).
|
||||
Return(int64(0), nil).AnyTimes()
|
||||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetBuild(buildName, buildNumber).
|
||||
GetBuild(jobName, buildNumber).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: SuccessStatus,
|
||||
Result: string(virtuslabv1alpha1.BuildSuccessStatus),
|
||||
},
|
||||
}, nil).AnyTimes()
|
||||
|
||||
done, err := jobs.EnsureBuildJob(buildName, encodedHash, nil, jenkins, true)
|
||||
done, err := jobs.EnsureBuildJob(jobName, encodedHash, nil, jenkins, true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fakeClient.Get(ctx, types.NamespacedName{Name: jenkins.Name, Namespace: jenkins.Namespace}, jenkins)
|
||||
|
|
@ -85,7 +81,7 @@ func TestSuccessEnsureJob(t *testing.T) {
|
|||
assert.Equal(t, len(jenkins.Status.Builds), 1)
|
||||
|
||||
build := jenkins.Status.Builds[0]
|
||||
assert.Equal(t, build.Name, buildName)
|
||||
assert.Equal(t, build.JobName, jobName)
|
||||
assert.Equal(t, build.Hash, encodedHash)
|
||||
assert.Equal(t, build.Number, buildNumber)
|
||||
assert.Equal(t, build.Retires, 0)
|
||||
|
|
@ -95,13 +91,13 @@ func TestSuccessEnsureJob(t *testing.T) {
|
|||
// first run - build should be scheduled and status updated
|
||||
if reconcileAttempt == 1 {
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Status, RunningStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// second run -job should be success and status updated
|
||||
if reconcileAttempt == 2 {
|
||||
assert.True(t, done)
|
||||
assert.Equal(t, build.Status, SuccessStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildSuccessStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -113,9 +109,9 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
buildName := "Test Job"
|
||||
jobName := "Test Job"
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(buildName))
|
||||
hash.Write([]byte(jobName))
|
||||
encodedHash := base64.URLEncoding.EncodeToString(hash.Sum(nil))
|
||||
|
||||
// when
|
||||
|
|
@ -133,7 +129,7 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
if reconcileAttempt == 1 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetJob(buildName).
|
||||
GetJob(jobName).
|
||||
Return(&gojenkins.Job{
|
||||
Raw: &gojenkins.JobResponse{
|
||||
NextBuildNumber: int64(1),
|
||||
|
|
@ -142,7 +138,7 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
BuildJob(buildName, gomock.Any()).
|
||||
BuildJob(jobName, gomock.Any()).
|
||||
Return(int64(0), nil)
|
||||
}
|
||||
|
||||
|
|
@ -150,10 +146,10 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
if reconcileAttempt == 2 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetBuild(buildName, int64(1)).
|
||||
GetBuild(jobName, int64(1)).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: FailureStatus,
|
||||
Result: string(virtuslabv1alpha1.BuildFailureStatus),
|
||||
},
|
||||
}, nil)
|
||||
}
|
||||
|
|
@ -162,7 +158,7 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
if reconcileAttempt == 3 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetJob(buildName).
|
||||
GetJob(jobName).
|
||||
Return(&gojenkins.Job{
|
||||
Raw: &gojenkins.JobResponse{
|
||||
NextBuildNumber: int64(2),
|
||||
|
|
@ -171,7 +167,7 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
BuildJob(buildName, gomock.Any()).
|
||||
BuildJob(jobName, gomock.Any()).
|
||||
Return(int64(0), nil)
|
||||
}
|
||||
|
||||
|
|
@ -179,15 +175,15 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
if reconcileAttempt == 4 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetBuild(buildName, int64(2)).
|
||||
GetBuild(jobName, int64(2)).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: SuccessStatus,
|
||||
Result: string(virtuslabv1alpha1.BuildSuccessStatus),
|
||||
},
|
||||
}, nil)
|
||||
}
|
||||
|
||||
done, errEnsureBuildJob := jobs.EnsureBuildJob(buildName, encodedHash, nil, jenkins, true)
|
||||
done, errEnsureBuildJob := jobs.EnsureBuildJob(jobName, encodedHash, nil, jenkins, true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fakeClient.Get(ctx, types.NamespacedName{Name: jenkins.Name, Namespace: jenkins.Namespace}, jenkins)
|
||||
|
|
@ -197,7 +193,7 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
assert.Equal(t, len(jenkins.Status.Builds), 1)
|
||||
|
||||
build := jenkins.Status.Builds[0]
|
||||
assert.Equal(t, build.Name, buildName)
|
||||
assert.Equal(t, build.JobName, jobName)
|
||||
assert.Equal(t, build.Hash, encodedHash)
|
||||
|
||||
assert.NotNil(t, build.CreateTime)
|
||||
|
|
@ -208,7 +204,7 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
assert.NoError(t, errEnsureBuildJob)
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(1))
|
||||
assert.Equal(t, build.Status, RunningStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// second run - build should be failure and status updated
|
||||
|
|
@ -216,7 +212,7 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
assert.Error(t, errEnsureBuildJob)
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(1))
|
||||
assert.Equal(t, build.Status, FailureStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildFailureStatus)
|
||||
}
|
||||
|
||||
// third run - build should be rescheduled and status updated
|
||||
|
|
@ -224,7 +220,7 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
assert.NoError(t, errEnsureBuildJob)
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Status, RunningStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// fourth run - build should be success and status updated
|
||||
|
|
@ -232,7 +228,7 @@ func TestEnsureJobWithFailedBuild(t *testing.T) {
|
|||
assert.NoError(t, errEnsureBuildJob)
|
||||
assert.True(t, done)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Status, SuccessStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildSuccessStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -285,7 +281,7 @@ func TestEnsureJobFailedWithMaxRetries(t *testing.T) {
|
|||
GetBuild(buildName, int64(1)).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: FailureStatus,
|
||||
Result: string(virtuslabv1alpha1.BuildFailureStatus),
|
||||
},
|
||||
}, nil)
|
||||
}
|
||||
|
|
@ -314,7 +310,7 @@ func TestEnsureJobFailedWithMaxRetries(t *testing.T) {
|
|||
GetBuild(buildName, int64(2)).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: FailureStatus,
|
||||
Result: string(virtuslabv1alpha1.BuildFailureStatus),
|
||||
},
|
||||
}, nil)
|
||||
}
|
||||
|
|
@ -329,7 +325,7 @@ func TestEnsureJobFailedWithMaxRetries(t *testing.T) {
|
|||
assert.Equal(t, len(jenkins.Status.Builds), 1)
|
||||
|
||||
build := jenkins.Status.Builds[0]
|
||||
assert.Equal(t, build.Name, buildName)
|
||||
assert.Equal(t, build.JobName, buildName)
|
||||
assert.Equal(t, build.Hash, encodedHash)
|
||||
|
||||
assert.NotNil(t, build.CreateTime)
|
||||
|
|
@ -341,7 +337,7 @@ func TestEnsureJobFailedWithMaxRetries(t *testing.T) {
|
|||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(1))
|
||||
assert.Equal(t, build.Retires, 0)
|
||||
assert.Equal(t, build.Status, RunningStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// second run - build should be failure and status updated
|
||||
|
|
@ -350,7 +346,7 @@ func TestEnsureJobFailedWithMaxRetries(t *testing.T) {
|
|||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(1))
|
||||
assert.Equal(t, build.Retires, 0)
|
||||
assert.Equal(t, build.Status, FailureStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildFailureStatus)
|
||||
}
|
||||
|
||||
// third run - build should be rescheduled and status updated
|
||||
|
|
@ -360,7 +356,7 @@ func TestEnsureJobFailedWithMaxRetries(t *testing.T) {
|
|||
//assert.Equal(t, build.Retires, 1)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Retires, 1)
|
||||
assert.Equal(t, build.Status, RunningStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// fourth run - build should be failure and status updated
|
||||
|
|
@ -369,7 +365,7 @@ func TestEnsureJobFailedWithMaxRetries(t *testing.T) {
|
|||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Retires, 1)
|
||||
assert.Equal(t, build.Status, FailureStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildFailureStatus)
|
||||
}
|
||||
|
||||
// fifth run - build should be unrecoverable failed and status updated
|
||||
|
|
@ -378,7 +374,7 @@ func TestEnsureJobFailedWithMaxRetries(t *testing.T) {
|
|||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Retires, 1)
|
||||
assert.Equal(t, build.Status, FailureStatus)
|
||||
assert.Equal(t, build.Status, virtuslabv1alpha1.BuildFailureStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -68,5 +68,5 @@ func verifyJenkinsSeedJobs(t *testing.T, client *gojenkins.Jenkins, jenkins *vir
|
|||
assert.NotEmpty(t, jenkins.Status.Builds)
|
||||
assert.Equal(t, len(jenkins.Status.Builds), 1)
|
||||
build := jenkins.Status.Builds[0]
|
||||
assert.Equal(t, build.Name, seedjobs.ConfigureSeedJobsName)
|
||||
assert.Equal(t, build.JobName, seedjobs.ConfigureSeedJobsName)
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue