Remove unused job package and fix error handling in seedjobs package
This commit is contained in:
parent
30b83638e2
commit
eb0f8a8331
|
|
@ -145,9 +145,8 @@ func isNotFoundError(err error) bool {
|
|||
func (jenkins *jenkins) GetNodeSecret(name string) (string, error) {
|
||||
var content string
|
||||
_, err := jenkins.Requester.GetXML(fmt.Sprintf("/computer/%s/slave-agent.jnlp", name), &content, nil)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
||||
match := regex.FindStringSubmatch(content)
|
||||
|
|
|
|||
|
|
@ -4,8 +4,8 @@ import (
|
|||
"fmt"
|
||||
"text/template"
|
||||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
"github.com/jenkinsci/kubernetes-operator/internal/render"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/constants"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@ import (
|
|||
"fmt"
|
||||
"text/template"
|
||||
|
||||
"github.com/jenkinsci/kubernetes-operator/internal/render"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/constants"
|
||||
"github.com/jenkinsci/kubernetes-operator/internal/render"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package user
|
|||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
jenkinsclient "github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/client"
|
||||
|
|
@ -11,10 +10,8 @@ import (
|
|||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/user/casc"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/user/seedjobs"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/groovy"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/jobs"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
k8s "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
|
@ -82,20 +79,10 @@ func (r *ReconcileUserConfiguration) ensureSeedJobs() (reconcile.Result, error)
|
|||
seedJobs := seedjobs.New(r.jenkinsClient, r.k8sClient, r.logger)
|
||||
done, err := seedJobs.EnsureSeedJobs(r.jenkins)
|
||||
if err != nil {
|
||||
// build failed and can be recovered - retry build and requeue reconciliation loop with timeout
|
||||
if err == jobs.ErrorBuildFailed {
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 10}, nil
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
// build failed and cannot be recovered
|
||||
if err == jobs.ErrorUnrecoverableBuildFailed {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
// unexpected error - requeue reconciliation loop
|
||||
return reconcile.Result{}, errors.WithStack(err)
|
||||
}
|
||||
// build not finished yet - requeue reconciliation loop with timeout
|
||||
if !done {
|
||||
return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil
|
||||
return reconcile.Result{Requeue: true}, nil
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ func (s *SeedJobs) EnsureSeedJobs(jenkins *v1alpha2.Jenkins) (done bool, err err
|
|||
})
|
||||
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return false, err
|
||||
return false, stackerr.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -193,7 +193,7 @@ func (s *SeedJobs) EnsureSeedJobs(jenkins *v1alpha2.Jenkins) (done bool, err err
|
|||
|
||||
// createJob is responsible for creating jenkins job which configures jenkins seed jobs and deploy keys
|
||||
func (s *SeedJobs) createJobs(jenkins *v1alpha2.Jenkins) (requeue bool, err error) {
|
||||
groovyClient := groovy.New(s.jenkinsClient, s.k8sClient, s.logger, jenkins, "user-groovy", jenkins.Spec.GroovyScripts.Customization)
|
||||
groovyClient := groovy.New(s.jenkinsClient, s.k8sClient, s.logger, jenkins, "seed-jobs", jenkins.Spec.GroovyScripts.Customization)
|
||||
for _, seedJob := range jenkins.Spec.SeedJobs {
|
||||
credentialValue, err := s.credentialValue(jenkins.Namespace, seedJob)
|
||||
if err != nil {
|
||||
|
|
@ -209,7 +209,6 @@ func (s *SeedJobs) createJobs(jenkins *v1alpha2.Jenkins) (requeue bool, err erro
|
|||
hash.Write([]byte(groovyScript))
|
||||
hash.Write([]byte(credentialValue))
|
||||
requeue, err := groovyClient.EnsureSingle(seedJob.ID, fmt.Sprintf("%s.groovy", seedJob.ID), base64.URLEncoding.EncodeToString(hash.Sum(nil)), groovyScript)
|
||||
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
|
@ -240,9 +239,8 @@ func (s *SeedJobs) ensureLabelsForSecrets(jenkins v1alpha2.Jenkins) error {
|
|||
|
||||
if !resources.VerifyIfLabelsAreSet(secret, requiredLabels) {
|
||||
secret.ObjectMeta.Labels = requiredLabels
|
||||
err = stackerr.WithStack(s.k8sClient.Update(context.TODO(), secret))
|
||||
if err != nil {
|
||||
return err
|
||||
if err = s.k8sClient.Update(context.TODO(), secret); err != nil {
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -257,7 +255,7 @@ func (s *SeedJobs) credentialValue(namespace string, seedJob v1alpha2.SeedJob) (
|
|||
namespaceName := types.NamespacedName{Namespace: namespace, Name: seedJob.CredentialID}
|
||||
err := s.k8sClient.Get(context.TODO(), namespaceName, secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", stackerr.WithStack(err)
|
||||
}
|
||||
|
||||
if seedJob.JenkinsCredentialType == v1alpha2.BasicSSHCredentialType {
|
||||
|
|
@ -321,10 +319,10 @@ func (s SeedJobs) createAgent(jenkinsClient jenkinsclient.Jenkins, k8sClient cli
|
|||
if err != nil && err.Error() == "No node found" {
|
||||
_, err = jenkinsClient.CreateNode(agentName, 1, "The jenkins-operator generated agent", "/home/jenkins", agentName)
|
||||
if err != nil {
|
||||
return err
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
|
||||
secret, err := jenkinsClient.GetNodeSecret(agentName)
|
||||
|
|
@ -338,10 +336,10 @@ func (s SeedJobs) createAgent(jenkinsClient jenkinsclient.Jenkins, k8sClient cli
|
|||
if apierrors.IsAlreadyExists(err) {
|
||||
err := k8sClient.Update(context.TODO(), deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -19,15 +19,15 @@ import (
|
|||
)
|
||||
|
||||
// ValidateSeedJobs verify seed jobs configuration
|
||||
func (r *SeedJobs) ValidateSeedJobs(jenkins v1alpha2.Jenkins) (bool, error) {
|
||||
func (s *SeedJobs) ValidateSeedJobs(jenkins v1alpha2.Jenkins) (bool, error) {
|
||||
valid := true
|
||||
|
||||
if !r.validateIfIDIsUnique(jenkins.Spec.SeedJobs) {
|
||||
if !s.validateIfIDIsUnique(jenkins.Spec.SeedJobs) {
|
||||
valid = false
|
||||
}
|
||||
|
||||
for _, seedJob := range jenkins.Spec.SeedJobs {
|
||||
logger := r.logger.WithValues("seedJob", fmt.Sprintf("%+v", seedJob)).V(log.VWarn)
|
||||
logger := s.logger.WithValues("seedJob", seedJob.ID).V(log.VWarn)
|
||||
|
||||
if len(seedJob.ID) == 0 {
|
||||
logger.Info("id can't be empty")
|
||||
|
|
@ -69,7 +69,7 @@ func (r *SeedJobs) ValidateSeedJobs(jenkins v1alpha2.Jenkins) (bool, error) {
|
|||
if seedJob.JenkinsCredentialType == v1alpha2.BasicSSHCredentialType || seedJob.JenkinsCredentialType == v1alpha2.UsernamePasswordCredentialType {
|
||||
secret := &v1.Secret{}
|
||||
namespaceName := types.NamespacedName{Namespace: jenkins.Namespace, Name: seedJob.CredentialID}
|
||||
err := r.k8sClient.Get(context.TODO(), namespaceName, secret)
|
||||
err := s.k8sClient.Get(context.TODO(), namespaceName, secret)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
logger.Info(fmt.Sprintf("required secret '%s' with Jenkins credential not found", seedJob.CredentialID))
|
||||
return false, nil
|
||||
|
|
@ -90,19 +90,19 @@ func (r *SeedJobs) ValidateSeedJobs(jenkins v1alpha2.Jenkins) (bool, error) {
|
|||
}
|
||||
|
||||
if len(seedJob.BuildPeriodically) > 0 {
|
||||
if !r.validateSchedule(seedJob, seedJob.BuildPeriodically, "buildPeriodically") {
|
||||
if !s.validateSchedule(seedJob, seedJob.BuildPeriodically, "buildPeriodically") {
|
||||
valid = false
|
||||
}
|
||||
}
|
||||
|
||||
if len(seedJob.PollSCM) > 0 {
|
||||
if !r.validateSchedule(seedJob, seedJob.PollSCM, "pollSCM") {
|
||||
if !s.validateSchedule(seedJob, seedJob.PollSCM, "pollSCM") {
|
||||
valid = false
|
||||
}
|
||||
}
|
||||
|
||||
if seedJob.GitHubPushTrigger {
|
||||
if !r.validateGitHubPushTrigger(jenkins) {
|
||||
if !s.validateGitHubPushTrigger(jenkins) {
|
||||
valid = false
|
||||
}
|
||||
}
|
||||
|
|
@ -111,16 +111,16 @@ func (r *SeedJobs) ValidateSeedJobs(jenkins v1alpha2.Jenkins) (bool, error) {
|
|||
return valid, nil
|
||||
}
|
||||
|
||||
func (r *SeedJobs) validateSchedule(job v1alpha2.SeedJob, str string, key string) bool {
|
||||
func (s *SeedJobs) validateSchedule(job v1alpha2.SeedJob, str string, key string) bool {
|
||||
_, err := cron.Parse(str)
|
||||
if err != nil {
|
||||
r.logger.V(log.VWarn).Info(fmt.Sprintf("`%s` schedule '%s' is invalid cron spec in `%s`", key, str, job.ID))
|
||||
s.logger.V(log.VWarn).Info(fmt.Sprintf("`%s` schedule '%s' is invalid cron spec in `%s`", key, str, job.ID))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *SeedJobs) validateGitHubPushTrigger(jenkins v1alpha2.Jenkins) bool {
|
||||
func (s *SeedJobs) validateGitHubPushTrigger(jenkins v1alpha2.Jenkins) bool {
|
||||
exists := false
|
||||
for _, plugin := range jenkins.Spec.Master.BasePlugins {
|
||||
if plugin.Name == "github" {
|
||||
|
|
@ -136,17 +136,17 @@ func (r *SeedJobs) validateGitHubPushTrigger(jenkins v1alpha2.Jenkins) bool {
|
|||
}
|
||||
|
||||
if !exists && !userExists {
|
||||
r.logger.V(log.VWarn).Info("githubPushTrigger is set. This function requires `github` plugin installed in .Spec.Master.Plugins because seed jobs Push Trigger function needs it")
|
||||
s.logger.V(log.VWarn).Info("githubPushTrigger is set. This function requires `github` plugin installed in .Spec.Master.Plugins because seed jobs Push Trigger function needs it")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *SeedJobs) validateIfIDIsUnique(seedJobs []v1alpha2.SeedJob) bool {
|
||||
func (s *SeedJobs) validateIfIDIsUnique(seedJobs []v1alpha2.SeedJob) bool {
|
||||
ids := map[string]bool{}
|
||||
for _, seedJob := range seedJobs {
|
||||
if _, found := ids[seedJob.ID]; found {
|
||||
r.logger.V(log.VWarn).Info(fmt.Sprintf("'%s' seed job ID is not unique", seedJob.ID))
|
||||
s.logger.V(log.VWarn).Info(fmt.Sprintf("'%s' seed job ID is not unique", seedJob.ID))
|
||||
return false
|
||||
}
|
||||
ids[seedJob.ID] = true
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ import (
|
|||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/base/resources"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/user"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/constants"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/jobs"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/plugins"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/event"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/log"
|
||||
|
|
@ -169,9 +168,6 @@ func (r *ReconcileJenkins) Reconcile(request reconcile.Request) (reconcile.Resul
|
|||
}
|
||||
}
|
||||
|
||||
if err == jobs.ErrorUnrecoverableBuildFailed {
|
||||
return reconcile.Result{Requeue: false}, nil
|
||||
}
|
||||
if _, ok := err.(*jenkinsclient.GroovyScriptExecutionFailed); ok {
|
||||
return reconcile.Result{Requeue: false}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,2 +0,0 @@
|
|||
// Package jobs implements common jenkins jobs operations
|
||||
package jobs
|
||||
|
|
@ -1,293 +0,0 @@
|
|||
package jobs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/client"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/log"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8s "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorUnexpectedBuildStatus - this is custom error returned when jenkins build has unexpected status
|
||||
ErrorUnexpectedBuildStatus = fmt.Errorf("unexpected build status")
|
||||
// ErrorBuildFailed - this is custom error returned when jenkins build has failed
|
||||
ErrorBuildFailed = fmt.Errorf("build failed")
|
||||
// ErrorAbortBuildFailed - this is custom error returned when jenkins build couldn't be aborted
|
||||
ErrorAbortBuildFailed = fmt.Errorf("build abort failed")
|
||||
// ErrorUnrecoverableBuildFailed - this is custom error returned when jenkins build has failed and cannot be recovered
|
||||
ErrorUnrecoverableBuildFailed = fmt.Errorf("build failed and cannot be recovered")
|
||||
// ErrorNotFound - this is error returned when jenkins build couldn't be found
|
||||
ErrorNotFound = fmt.Errorf("404")
|
||||
// BuildRetires - determines max amount of retires for failed build
|
||||
BuildRetires = 3
|
||||
)
|
||||
|
||||
// Jobs defines Jobs API tailored for operator sdk
|
||||
type Jobs struct {
|
||||
jenkinsClient client.Jenkins
|
||||
logger logr.Logger
|
||||
k8sClient k8s.Client
|
||||
}
|
||||
|
||||
// New creates jobs client
|
||||
func New(jenkinsClient client.Jenkins, k8sClient k8s.Client, logger logr.Logger) *Jobs {
|
||||
return &Jobs{
|
||||
jenkinsClient: jenkinsClient,
|
||||
k8sClient: k8sClient,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureBuildJob function takes care of jenkins build lifecycle according to the lifecycle of reconciliation loop
|
||||
// implementation guarantees that jenkins build can be properly handled even after operator pod restart
|
||||
// entire state is saved in Jenkins.Status.Builds section
|
||||
// function return 'true' when build finished successfully or false when reconciliation loop should requeue this function
|
||||
// preserveStatus determines that build won't be removed from Jenkins.Status.Builds section
|
||||
func (jobs *Jobs) EnsureBuildJob(jobName, hash string, parameters map[string]string, jenkins *v1alpha2.Jenkins, preserveStatus bool) (done bool, err error) {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Ensuring build, name:'%s' hash:'%s'", jobName, hash))
|
||||
|
||||
build := jobs.getBuildFromStatus(jobName, hash, jenkins)
|
||||
if build != nil {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Build exists in status, %+v", build))
|
||||
switch build.Status {
|
||||
case v1alpha2.BuildSuccessStatus:
|
||||
return jobs.ensureSuccessBuild(*build, jenkins, preserveStatus)
|
||||
case v1alpha2.BuildRunningStatus:
|
||||
return jobs.ensureRunningBuild(*build, jenkins, preserveStatus)
|
||||
case v1alpha2.BuildUnstableStatus, v1alpha2.BuildNotBuildStatus, v1alpha2.BuildFailureStatus, v1alpha2.BuildAbortedStatus:
|
||||
return jobs.ensureFailedBuild(*build, jenkins, parameters, preserveStatus)
|
||||
case v1alpha2.BuildExpiredStatus:
|
||||
return jobs.ensureExpiredBuild(*build, jenkins, preserveStatus)
|
||||
default:
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Unexpected build status, %+v", build))
|
||||
return false, ErrorUnexpectedBuildStatus
|
||||
}
|
||||
}
|
||||
|
||||
// build is run first time - build job and update status
|
||||
created := metav1.Now()
|
||||
newBuild := v1alpha2.Build{
|
||||
JobName: jobName,
|
||||
Hash: hash,
|
||||
CreateTime: &created,
|
||||
}
|
||||
return jobs.buildJob(newBuild, parameters, jenkins)
|
||||
}
|
||||
|
||||
func (jobs *Jobs) getBuildFromStatus(jobName string, hash string, jenkins *v1alpha2.Jenkins) *v1alpha2.Build {
|
||||
if jenkins != nil {
|
||||
builds := jenkins.Status.Builds
|
||||
for _, build := range builds {
|
||||
if build.JobName == jobName && build.Hash == hash {
|
||||
return &build
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (jobs *Jobs) ensureSuccessBuild(build v1alpha2.Build, jenkins *v1alpha2.Jenkins, preserveStatus bool) (bool, error) {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Ensuring success build, %+v", build))
|
||||
|
||||
if !preserveStatus {
|
||||
err := jobs.removeBuildFromStatus(build, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't remove build from status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (jobs *Jobs) ensureRunningBuild(build v1alpha2.Build, jenkins *v1alpha2.Jenkins, preserveStatus bool) (bool, error) {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Ensuring running build, %+v", build))
|
||||
// FIXME (antoniaklja) implement build expiration
|
||||
|
||||
jenkinsBuild, err := jobs.jenkinsClient.GetBuild(build.JobName, build.Number)
|
||||
if isNotFoundError(err) {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Build still running , %+v", build))
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't get jenkins build, %+v", build))
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if jenkinsBuild.GetResult() != "" {
|
||||
build.Status = v1alpha2.BuildStatus(strings.ToLower(jenkinsBuild.GetResult()))
|
||||
}
|
||||
|
||||
err = jobs.updateBuildStatus(build, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Couldn't update build status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
|
||||
if build.Status == v1alpha2.BuildSuccessStatus {
|
||||
jobs.logger.Info(fmt.Sprintf("Build finished successfully, %+v", build))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if build.Status == v1alpha2.BuildFailureStatus || build.Status == v1alpha2.BuildUnstableStatus ||
|
||||
build.Status == v1alpha2.BuildNotBuildStatus || build.Status == v1alpha2.BuildAbortedStatus {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Build failed, %+v", build))
|
||||
return false, ErrorBuildFailed
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (jobs *Jobs) ensureFailedBuild(build v1alpha2.Build, jenkins *v1alpha2.Jenkins, parameters map[string]string, preserveStatus bool) (bool, error) {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Ensuring failed build, %+v", build))
|
||||
|
||||
if build.Retires < BuildRetires {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Retrying build, %+v", build))
|
||||
build.Retires = build.Retires + 1
|
||||
_, err := jobs.buildJob(build, parameters, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't retry build, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
lastFailedBuild, err := jobs.jenkinsClient.GetBuild(build.JobName, build.Number)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("The retries limit was reached, build %+v, logs: %s", build, lastFailedBuild.GetConsoleOutput()))
|
||||
|
||||
if !preserveStatus {
|
||||
err := jobs.removeBuildFromStatus(build, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't remove build from status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return false, ErrorUnrecoverableBuildFailed
|
||||
}
|
||||
|
||||
func (jobs *Jobs) ensureExpiredBuild(build v1alpha2.Build, jenkins *v1alpha2.Jenkins, preserveStatus bool) (bool, error) {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Ensuring expired build, %+v", build))
|
||||
|
||||
jenkinsBuild, err := jobs.jenkinsClient.GetBuild(build.JobName, build.Number)
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
_, err = jenkinsBuild.Stop()
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
jenkinsBuild, err = jobs.jenkinsClient.GetBuild(build.JobName, build.Number)
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if v1alpha2.BuildStatus(jenkinsBuild.GetResult()) != v1alpha2.BuildAbortedStatus {
|
||||
return false, ErrorAbortBuildFailed
|
||||
}
|
||||
|
||||
err = jobs.updateBuildStatus(build, jenkins)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// TODO(antoniaklja) clean up k8s resources
|
||||
|
||||
if !preserveStatus {
|
||||
err = jobs.removeBuildFromStatus(build, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't remove build from status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (jobs *Jobs) removeBuildFromStatus(build v1alpha2.Build, jenkins *v1alpha2.Jenkins) error {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Removing build from status, %+v", build))
|
||||
builds := make([]v1alpha2.Build, len(jenkins.Status.Builds))
|
||||
for _, existingBuild := range jenkins.Status.Builds {
|
||||
if existingBuild.JobName != build.JobName && existingBuild.Hash != build.Hash {
|
||||
builds = append(builds, existingBuild)
|
||||
}
|
||||
}
|
||||
jenkins.Status.Builds = builds
|
||||
err := jobs.k8sClient.Update(context.TODO(), jenkins)
|
||||
if err != nil {
|
||||
return err // don't wrap because apierrors.IsConflict(err) won't work in jenkins_controller
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (jobs *Jobs) buildJob(build v1alpha2.Build, parameters map[string]string, jenkins *v1alpha2.Jenkins) (bool, error) {
|
||||
jobs.logger.Info(fmt.Sprintf("Running job, %+v", build))
|
||||
job, err := jobs.jenkinsClient.GetJob(build.JobName)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't find jenkins job, %+v", build))
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
nextBuildNumber := job.GetDetails().NextBuildNumber
|
||||
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Running build, %+v", build))
|
||||
_, err = jobs.jenkinsClient.BuildJob(build.JobName, parameters)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VWarn).Info(fmt.Sprintf("Couldn't run build, %+v", build))
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
build.Status = v1alpha2.BuildRunningStatus
|
||||
build.Number = nextBuildNumber
|
||||
|
||||
err = jobs.updateBuildStatus(build, jenkins)
|
||||
if err != nil {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Couldn't update build status, %+v", build))
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (jobs *Jobs) updateBuildStatus(build v1alpha2.Build, jenkins *v1alpha2.Jenkins) error {
|
||||
jobs.logger.V(log.VDebug).Info(fmt.Sprintf("Updating build status, %+v", build))
|
||||
// get index of existing build from status if exists
|
||||
buildIndex := -1
|
||||
for index, existingBuild := range jenkins.Status.Builds {
|
||||
if build.JobName == existingBuild.JobName && build.Hash == existingBuild.Hash {
|
||||
buildIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
// update build status
|
||||
now := metav1.Now()
|
||||
build.LastUpdateTime = &now
|
||||
if buildIndex >= 0 {
|
||||
jenkins.Status.Builds[buildIndex] = build
|
||||
} else {
|
||||
build.CreateTime = &now
|
||||
jenkins.Status.Builds = append(jenkins.Status.Builds, build)
|
||||
}
|
||||
err := jobs.k8sClient.Update(context.TODO(), jenkins)
|
||||
if err != nil {
|
||||
return err // don't wrap because apierrors.IsConflict(err) won't work in jenkins_controller
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isNotFoundError(err error) bool {
|
||||
if err != nil {
|
||||
return err.Error() == ErrorNotFound.Error()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -1,434 +0,0 @@
|
|||
package jobs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/client"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/base/resources"
|
||||
|
||||
"github.com/bndr/gojenkins"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
||||
)
|
||||
|
||||
func TestSuccessEnsureJob(t *testing.T) {
|
||||
// given
|
||||
ctx := context.TODO()
|
||||
logger := logf.ZapLogger(false)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
jobName := "Test Job"
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(jobName))
|
||||
encodedHash := base64.URLEncoding.EncodeToString(hash.Sum(nil))
|
||||
|
||||
// when
|
||||
jenkins := jenkinsCustomResource()
|
||||
fakeClient := fake.NewFakeClient()
|
||||
err := v1alpha2.SchemeBuilder.AddToScheme(scheme.Scheme)
|
||||
assert.NoError(t, err)
|
||||
err = fakeClient.Create(ctx, jenkins)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for reconcileAttempt := 1; reconcileAttempt <= 2; reconcileAttempt++ {
|
||||
logger.Info(fmt.Sprintf("Reconcile attempt #%d", reconcileAttempt))
|
||||
buildNumber := int64(1)
|
||||
jenkinsClient := client.NewMockJenkins(ctrl)
|
||||
jobs := New(jenkinsClient, fakeClient, logger)
|
||||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetJob(jobName).
|
||||
Return(&gojenkins.Job{
|
||||
Raw: &gojenkins.JobResponse{
|
||||
NextBuildNumber: buildNumber,
|
||||
},
|
||||
}, nil).AnyTimes()
|
||||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
BuildJob(jobName, gomock.Any()).
|
||||
Return(int64(0), nil).AnyTimes()
|
||||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetBuild(jobName, buildNumber).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: string(v1alpha2.BuildSuccessStatus),
|
||||
},
|
||||
}, nil).AnyTimes()
|
||||
|
||||
done, err := jobs.EnsureBuildJob(jobName, encodedHash, nil, jenkins, true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fakeClient.Get(ctx, types.NamespacedName{Name: jenkins.Name, Namespace: jenkins.Namespace}, jenkins)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotEmpty(t, jenkins.Status.Builds)
|
||||
assert.Equal(t, len(jenkins.Status.Builds), 1)
|
||||
|
||||
build := jenkins.Status.Builds[0]
|
||||
assert.Equal(t, build.JobName, jobName)
|
||||
assert.Equal(t, build.Hash, encodedHash)
|
||||
assert.Equal(t, build.Number, buildNumber)
|
||||
assert.Equal(t, build.Retires, 0)
|
||||
assert.NotNil(t, build.CreateTime)
|
||||
assert.NotNil(t, build.LastUpdateTime)
|
||||
|
||||
// first run - build should be scheduled and status updated
|
||||
if reconcileAttempt == 1 {
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// second run -job should be success and status updated
|
||||
if reconcileAttempt == 2 {
|
||||
assert.True(t, done)
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildSuccessStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureJobWithFailedBuild(t *testing.T) {
|
||||
// given
|
||||
ctx := context.TODO()
|
||||
logger := logf.ZapLogger(false)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
jobName := "Test Job"
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(jobName))
|
||||
encodedHash := base64.URLEncoding.EncodeToString(hash.Sum(nil))
|
||||
|
||||
// when
|
||||
jenkins := jenkinsCustomResource()
|
||||
fakeClient := fake.NewFakeClient()
|
||||
err := fakeClient.Create(ctx, jenkins)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for reconcileAttempt := 1; reconcileAttempt <= 4; reconcileAttempt++ {
|
||||
logger.Info(fmt.Sprintf("Reconcile attempt #%d", reconcileAttempt))
|
||||
jenkinsClient := client.NewMockJenkins(ctrl)
|
||||
jobs := New(jenkinsClient, fakeClient, logger)
|
||||
|
||||
// first run - build should be scheduled and status updated
|
||||
if reconcileAttempt == 1 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetJob(jobName).
|
||||
Return(&gojenkins.Job{
|
||||
Raw: &gojenkins.JobResponse{
|
||||
NextBuildNumber: int64(1),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
BuildJob(jobName, gomock.Any()).
|
||||
Return(int64(0), nil)
|
||||
}
|
||||
|
||||
// second run - build should be failure and status updated
|
||||
if reconcileAttempt == 2 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetBuild(jobName, int64(1)).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: string(v1alpha2.BuildFailureStatus),
|
||||
},
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// third run - build should be rescheduled and status updated
|
||||
if reconcileAttempt == 3 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetJob(jobName).
|
||||
Return(&gojenkins.Job{
|
||||
Raw: &gojenkins.JobResponse{
|
||||
NextBuildNumber: int64(2),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
BuildJob(jobName, gomock.Any()).
|
||||
Return(int64(0), nil)
|
||||
}
|
||||
|
||||
// fourth run - build should be success and status updated
|
||||
if reconcileAttempt == 4 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetBuild(jobName, int64(2)).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: string(v1alpha2.BuildSuccessStatus),
|
||||
},
|
||||
}, nil)
|
||||
}
|
||||
|
||||
done, errEnsureBuildJob := jobs.EnsureBuildJob(jobName, encodedHash, nil, jenkins, true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fakeClient.Get(ctx, types.NamespacedName{Name: jenkins.Name, Namespace: jenkins.Namespace}, jenkins)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotEmpty(t, jenkins.Status.Builds)
|
||||
assert.Equal(t, len(jenkins.Status.Builds), 1)
|
||||
|
||||
build := jenkins.Status.Builds[0]
|
||||
assert.Equal(t, build.JobName, jobName)
|
||||
assert.Equal(t, build.Hash, encodedHash)
|
||||
|
||||
assert.NotNil(t, build.CreateTime)
|
||||
assert.NotNil(t, build.LastUpdateTime)
|
||||
|
||||
// first run - build should be scheduled and status updated
|
||||
if reconcileAttempt == 1 {
|
||||
assert.NoError(t, errEnsureBuildJob)
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(1))
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// second run - build should be failure and status updated
|
||||
if reconcileAttempt == 2 {
|
||||
assert.Error(t, errEnsureBuildJob)
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(1))
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildFailureStatus)
|
||||
}
|
||||
|
||||
// third run - build should be rescheduled and status updated
|
||||
if reconcileAttempt == 3 {
|
||||
assert.NoError(t, errEnsureBuildJob)
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// fourth run - build should be success and status updated
|
||||
if reconcileAttempt == 4 {
|
||||
assert.NoError(t, errEnsureBuildJob)
|
||||
assert.True(t, done)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildSuccessStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureJobFailedWithMaxRetries(t *testing.T) {
|
||||
// given
|
||||
ctx := context.TODO()
|
||||
logger := logf.ZapLogger(false)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
buildName := "Test Job"
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(buildName))
|
||||
encodedHash := base64.URLEncoding.EncodeToString(hash.Sum(nil))
|
||||
|
||||
// when
|
||||
jenkins := jenkinsCustomResource()
|
||||
fakeClient := fake.NewFakeClient()
|
||||
err := fakeClient.Create(ctx, jenkins)
|
||||
assert.NoError(t, err)
|
||||
|
||||
BuildRetires = 1 // override max build retries
|
||||
for reconcileAttempt := 1; reconcileAttempt <= 5; reconcileAttempt++ {
|
||||
logger.Info(fmt.Sprintf("Reconcile attempt #%d", reconcileAttempt))
|
||||
jenkinsClient := client.NewMockJenkins(ctrl)
|
||||
jobs := New(jenkinsClient, fakeClient, logger)
|
||||
|
||||
// first run - build should be scheduled and status updated
|
||||
if reconcileAttempt == 1 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetJob(buildName).
|
||||
Return(&gojenkins.Job{
|
||||
Raw: &gojenkins.JobResponse{
|
||||
NextBuildNumber: int64(1),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
BuildJob(buildName, gomock.Any()).
|
||||
Return(int64(0), nil)
|
||||
}
|
||||
|
||||
// second run - build should be failure and status updated
|
||||
if reconcileAttempt == 2 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetBuild(buildName, int64(1)).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: string(v1alpha2.BuildFailureStatus),
|
||||
},
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// third run - build should be rescheduled and status updated
|
||||
if reconcileAttempt == 3 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetJob(buildName).
|
||||
Return(&gojenkins.Job{
|
||||
Raw: &gojenkins.JobResponse{
|
||||
NextBuildNumber: int64(2),
|
||||
},
|
||||
}, nil)
|
||||
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
BuildJob(buildName, gomock.Any()).
|
||||
Return(int64(0), nil)
|
||||
}
|
||||
|
||||
// fourth run - build should be success and status updated
|
||||
if reconcileAttempt == 4 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetBuild(buildName, int64(2)).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: string(v1alpha2.BuildFailureStatus),
|
||||
},
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// fifth run - build should be unrecoverable failed and status updated
|
||||
if reconcileAttempt == 5 {
|
||||
jenkinsClient.
|
||||
EXPECT().
|
||||
GetBuild(buildName, int64(2)).
|
||||
Return(&gojenkins.Build{
|
||||
Raw: &gojenkins.BuildResponse{
|
||||
Result: string(v1alpha2.BuildFailureStatus),
|
||||
},
|
||||
Jenkins: gojenkins.CreateJenkins(nil, ""),
|
||||
}, nil)
|
||||
}
|
||||
|
||||
done, errEnsureBuildJob := jobs.EnsureBuildJob(buildName, encodedHash, nil, jenkins, true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fakeClient.Get(ctx, types.NamespacedName{Name: jenkins.Name, Namespace: jenkins.Namespace}, jenkins)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotEmpty(t, jenkins.Status.Builds)
|
||||
assert.Equal(t, len(jenkins.Status.Builds), 1)
|
||||
|
||||
build := jenkins.Status.Builds[0]
|
||||
assert.Equal(t, build.JobName, buildName)
|
||||
assert.Equal(t, build.Hash, encodedHash)
|
||||
|
||||
assert.NotNil(t, build.CreateTime)
|
||||
assert.NotNil(t, build.LastUpdateTime)
|
||||
|
||||
// first run - build should be scheduled and status updated
|
||||
if reconcileAttempt == 1 {
|
||||
assert.NoError(t, errEnsureBuildJob)
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(1))
|
||||
assert.Equal(t, build.Retires, 0)
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// second run - build should be failure and status updated
|
||||
if reconcileAttempt == 2 {
|
||||
assert.EqualError(t, errEnsureBuildJob, ErrorBuildFailed.Error())
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(1))
|
||||
assert.Equal(t, build.Retires, 0)
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildFailureStatus)
|
||||
}
|
||||
|
||||
// third run - build should be rescheduled and status updated
|
||||
if reconcileAttempt == 3 {
|
||||
assert.NoError(t, errEnsureBuildJob)
|
||||
assert.False(t, done)
|
||||
//assert.Equal(t, build.Retires, 1)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Retires, 1)
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildRunningStatus)
|
||||
}
|
||||
|
||||
// fourth run - build should be failure and status updated
|
||||
if reconcileAttempt == 4 {
|
||||
assert.EqualError(t, errEnsureBuildJob, ErrorBuildFailed.Error())
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Retires, 1)
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildFailureStatus)
|
||||
}
|
||||
|
||||
// fifth run - build should be unrecoverable failed and status updated
|
||||
if reconcileAttempt == 5 {
|
||||
assert.EqualError(t, errEnsureBuildJob, ErrorUnrecoverableBuildFailed.Error())
|
||||
assert.False(t, done)
|
||||
assert.Equal(t, build.Number, int64(2))
|
||||
assert.Equal(t, build.Retires, 1)
|
||||
assert.Equal(t, build.Status, v1alpha2.BuildFailureStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func jenkinsCustomResource() *v1alpha2.Jenkins {
|
||||
return &v1alpha2.Jenkins{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "jenkins",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1alpha2.JenkinsSpec{
|
||||
Master: v1alpha2.JenkinsMaster{
|
||||
Annotations: map[string]string{"test": "label"},
|
||||
Containers: []v1alpha2.Container{
|
||||
{
|
||||
Name: resources.JenkinsMasterContainerName,
|
||||
Image: "jenkins/jenkins",
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("300m"),
|
||||
corev1.ResourceMemory: resource.MustParse("500Mi"),
|
||||
},
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
corev1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SeedJobs: []v1alpha2.SeedJob{
|
||||
{
|
||||
ID: "jenkins-operator-e2e",
|
||||
JenkinsCredentialType: v1alpha2.NoJenkinsCredentialCredentialType,
|
||||
Targets: "cicd/jobs/*.jenkins",
|
||||
Description: "Jenkins Operator e2e tests repository",
|
||||
RepositoryBranch: "master",
|
||||
RepositoryURL: "https://github.com/jenkinsci/kubernetes-operator.git",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue