Extend kubernetes client, pod termination notification
This commit is contained in:
parent
3fadf8bdbd
commit
c95108e3ac
1
go.mod
1
go.mod
|
|
@ -26,6 +26,7 @@ require (
|
|||
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3 // indirect
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
|
||||
k8s.io/api v0.0.0-20190612125737-db0771252981
|
||||
k8s.io/apimachinery v0.0.0-20190612125636-6a5db36e93ad
|
||||
|
|
|
|||
2
go.sum
2
go.sum
|
|
@ -569,6 +569,8 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi
|
|||
google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
|
||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
jenkinsclient "github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/client"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/backuprestore"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/base/resources"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/groovy"
|
||||
|
|
@ -40,36 +41,35 @@ const (
|
|||
|
||||
// ReconcileJenkinsBaseConfiguration defines values required for Jenkins base configuration
|
||||
type ReconcileJenkinsBaseConfiguration struct {
|
||||
k8sClient client.Client
|
||||
configuration.Configuration
|
||||
scheme *runtime.Scheme
|
||||
logger logr.Logger
|
||||
jenkins *v1alpha2.Jenkins
|
||||
local, minikube bool
|
||||
clientSet *kubernetes.Clientset
|
||||
config *rest.Config
|
||||
notificationEvents *chan notifications.Event
|
||||
}
|
||||
|
||||
// New create structure which takes care of base configuration
|
||||
func New(client client.Client, scheme *runtime.Scheme, logger logr.Logger,
|
||||
jenkins *v1alpha2.Jenkins, local, minikube bool, clientSet *kubernetes.Clientset, config *rest.Config,
|
||||
jenkins *v1alpha2.Jenkins, local, minikube bool, clientSet kubernetes.Clientset, config *rest.Config,
|
||||
notificationEvents *chan notifications.Event) *ReconcileJenkinsBaseConfiguration {
|
||||
return &ReconcileJenkinsBaseConfiguration{
|
||||
k8sClient: client,
|
||||
Configuration: configuration.Configuration{
|
||||
Client: client,
|
||||
ClientSet: clientSet,
|
||||
Notifications: notificationEvents,
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
scheme: scheme,
|
||||
logger: logger,
|
||||
jenkins: jenkins,
|
||||
local: local,
|
||||
minikube: minikube,
|
||||
clientSet: clientSet,
|
||||
config: config,
|
||||
notificationEvents: notificationEvents,
|
||||
}
|
||||
}
|
||||
|
||||
// Reconcile takes care of base configuration
|
||||
func (r *ReconcileJenkinsBaseConfiguration) Reconcile() (reconcile.Result, jenkinsclient.Jenkins, error) {
|
||||
metaObject := resources.NewResourceObjectMeta(r.jenkins)
|
||||
metaObject := resources.NewResourceObjectMeta(r.Configuration.Jenkins)
|
||||
|
||||
err := r.ensureResourcesRequiredForJenkinsPod(metaObject)
|
||||
if err != nil {
|
||||
|
|
@ -115,7 +115,7 @@ func (r *ReconcileJenkinsBaseConfiguration) Reconcile() (reconcile.Result, jenki
|
|||
}
|
||||
if !ok {
|
||||
r.logger.Info("Some plugins have changed, restarting Jenkins")
|
||||
return reconcile.Result{Requeue: true}, nil, r.restartJenkinsMasterPod()
|
||||
return reconcile.Result{Requeue: true}, nil, r.Configuration.RestartJenkinsMasterPod()
|
||||
}
|
||||
|
||||
result, err = r.ensureBaseConfiguration(jenkinsClient)
|
||||
|
|
@ -170,12 +170,12 @@ func (r *ReconcileJenkinsBaseConfiguration) ensureResourcesRequiredForJenkinsPod
|
|||
}
|
||||
r.logger.V(log.VDebug).Info("Base configuration config map is present")
|
||||
|
||||
if err := r.addLabelForWatchesResources(r.jenkins.Spec.GroovyScripts.Customization); err != nil {
|
||||
if err := r.addLabelForWatchesResources(r.Configuration.Jenkins.Spec.GroovyScripts.Customization); err != nil {
|
||||
return err
|
||||
}
|
||||
r.logger.V(log.VDebug).Info("GroovyScripts Secret and ConfigMap added watched labels")
|
||||
|
||||
if err := r.addLabelForWatchesResources(r.jenkins.Spec.ConfigurationAsCode.Customization); err != nil {
|
||||
if err := r.addLabelForWatchesResources(r.Configuration.Jenkins.Spec.ConfigurationAsCode.Customization); err != nil {
|
||||
return err
|
||||
}
|
||||
r.logger.V(log.VDebug).Info("ConfigurationAsCode Secret and ConfigMap added watched labels")
|
||||
|
|
@ -185,11 +185,11 @@ func (r *ReconcileJenkinsBaseConfiguration) ensureResourcesRequiredForJenkinsPod
|
|||
}
|
||||
r.logger.V(log.VDebug).Info("Service account, role and role binding are present")
|
||||
|
||||
if err := r.createService(metaObject, resources.GetJenkinsHTTPServiceName(r.jenkins), r.jenkins.Spec.Service); err != nil {
|
||||
if err := r.createService(metaObject, resources.GetJenkinsHTTPServiceName(r.Configuration.Jenkins), r.Configuration.Jenkins.Spec.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
r.logger.V(log.VDebug).Info("Jenkins HTTP Service is present")
|
||||
if err := r.createService(metaObject, resources.GetJenkinsSlavesServiceName(r.jenkins), r.jenkins.Spec.SlaveService); err != nil {
|
||||
if err := r.createService(metaObject, resources.GetJenkinsSlavesServiceName(r.Configuration.Jenkins), r.Configuration.Jenkins.Spec.SlaveService); err != nil {
|
||||
return err
|
||||
}
|
||||
r.logger.V(log.VDebug).Info("Jenkins slave Service is present")
|
||||
|
|
@ -212,7 +212,7 @@ func (r *ReconcileJenkinsBaseConfiguration) verifyPlugins(jenkinsClient jenkinsc
|
|||
r.logger.V(log.VDebug).Info(fmt.Sprintf("Installed plugins '%+v'", installedPlugins))
|
||||
|
||||
status := true
|
||||
allRequiredPlugins := [][]v1alpha2.Plugin{r.jenkins.Spec.Master.BasePlugins, r.jenkins.Spec.Master.Plugins}
|
||||
allRequiredPlugins := [][]v1alpha2.Plugin{r.Configuration.Jenkins.Spec.Master.BasePlugins, r.Configuration.Jenkins.Spec.Master.Plugins}
|
||||
for _, requiredPlugins := range allRequiredPlugins {
|
||||
for _, plugin := range requiredPlugins {
|
||||
if found, ok := isPluginInstalled(allPluginsInJenkins, plugin); !ok {
|
||||
|
|
@ -253,10 +253,10 @@ func isPluginInstalled(plugins *gojenkins.Plugins, requiredPlugin v1alpha2.Plugi
|
|||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) createOperatorCredentialsSecret(meta metav1.ObjectMeta) error {
|
||||
found := &corev1.Secret{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: resources.GetOperatorCredentialsSecretName(r.jenkins), Namespace: r.jenkins.ObjectMeta.Namespace}, found)
|
||||
err := r.Configuration.Client.Get(context.TODO(), types.NamespacedName{Name: resources.GetOperatorCredentialsSecretName(r.Configuration.Jenkins), Namespace: r.Configuration.Jenkins.ObjectMeta.Namespace}, found)
|
||||
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
return stackerr.WithStack(r.createResource(resources.NewOperatorCredentialsSecret(meta, r.jenkins)))
|
||||
return stackerr.WithStack(r.createResource(resources.NewOperatorCredentialsSecret(meta, r.Configuration.Jenkins)))
|
||||
} else if err != nil && !apierrors.IsNotFound(err) {
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
|
|
@ -266,11 +266,11 @@ func (r *ReconcileJenkinsBaseConfiguration) createOperatorCredentialsSecret(meta
|
|||
return nil
|
||||
}
|
||||
|
||||
return stackerr.WithStack(r.updateResource(resources.NewOperatorCredentialsSecret(meta, r.jenkins)))
|
||||
return stackerr.WithStack(r.updateResource(resources.NewOperatorCredentialsSecret(meta, r.Configuration.Jenkins)))
|
||||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) createScriptsConfigMap(meta metav1.ObjectMeta) error {
|
||||
configMap, err := resources.NewScriptsConfigMap(meta, r.jenkins)
|
||||
configMap, err := resources.NewScriptsConfigMap(meta, r.Configuration.Jenkins)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -278,7 +278,7 @@ func (r *ReconcileJenkinsBaseConfiguration) createScriptsConfigMap(meta metav1.O
|
|||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) createInitConfigurationConfigMap(meta metav1.ObjectMeta) error {
|
||||
configMap, err := resources.NewInitConfigurationConfigMap(meta, r.jenkins)
|
||||
configMap, err := resources.NewInitConfigurationConfigMap(meta, r.Configuration.Jenkins)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -286,16 +286,16 @@ func (r *ReconcileJenkinsBaseConfiguration) createInitConfigurationConfigMap(met
|
|||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) createBaseConfigurationConfigMap(meta metav1.ObjectMeta) error {
|
||||
configMap := resources.NewBaseConfigurationConfigMap(meta, r.jenkins)
|
||||
configMap := resources.NewBaseConfigurationConfigMap(meta, r.Configuration.Jenkins)
|
||||
return stackerr.WithStack(r.createOrUpdateResource(configMap))
|
||||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) addLabelForWatchesResources(customization v1alpha2.Customization) error {
|
||||
labelsForWatchedResources := resources.BuildLabelsForWatchedResources(*r.jenkins)
|
||||
labelsForWatchedResources := resources.BuildLabelsForWatchedResources(*r.Configuration.Jenkins)
|
||||
|
||||
if len(customization.Secret.Name) > 0 {
|
||||
secret := &corev1.Secret{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: customization.Secret.Name, Namespace: r.jenkins.Namespace}, secret)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: customization.Secret.Name, Namespace: r.Configuration.Jenkins.Namespace}, secret)
|
||||
if err != nil {
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
|
|
@ -308,15 +308,15 @@ func (r *ReconcileJenkinsBaseConfiguration) addLabelForWatchesResources(customiz
|
|||
secret.ObjectMeta.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = r.k8sClient.Update(context.TODO(), secret); err != nil {
|
||||
return stackerr.WithStack(r.k8sClient.Update(context.TODO(), secret))
|
||||
if err = r.Client.Update(context.TODO(), secret); err != nil {
|
||||
return stackerr.WithStack(r.Client.Update(context.TODO(), secret))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, configMapRef := range customization.Configurations {
|
||||
configMap := &corev1.ConfigMap{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: configMapRef.Name, Namespace: r.jenkins.Namespace}, configMap)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: configMapRef.Name, Namespace: r.Configuration.Jenkins.Namespace}, configMap)
|
||||
if err != nil {
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
|
|
@ -329,8 +329,8 @@ func (r *ReconcileJenkinsBaseConfiguration) addLabelForWatchesResources(customiz
|
|||
configMap.ObjectMeta.Labels[key] = value
|
||||
}
|
||||
|
||||
if err = r.k8sClient.Update(context.TODO(), configMap); err != nil {
|
||||
return stackerr.WithStack(r.k8sClient.Update(context.TODO(), configMap))
|
||||
if err = r.Client.Update(context.TODO(), configMap); err != nil {
|
||||
return stackerr.WithStack(r.Client.Update(context.TODO(), configMap))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -362,7 +362,7 @@ func (r *ReconcileJenkinsBaseConfiguration) createRBAC(meta metav1.ObjectMeta) e
|
|||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) createService(meta metav1.ObjectMeta, name string, config v1alpha2.Service) error {
|
||||
service := corev1.Service{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: meta.Namespace}, &service)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: meta.Namespace}, &service)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
service = resources.UpdateService(corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -386,9 +386,9 @@ func (r *ReconcileJenkinsBaseConfiguration) createService(meta metav1.ObjectMeta
|
|||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) getJenkinsMasterPod() (*corev1.Pod, error) {
|
||||
jenkinsMasterPodName := resources.GetJenkinsMasterPodName(*r.jenkins)
|
||||
jenkinsMasterPodName := resources.GetJenkinsMasterPodName(*r.Configuration.Jenkins)
|
||||
currentJenkinsMasterPod := &corev1.Pod{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: jenkinsMasterPodName, Namespace: r.jenkins.Namespace}, currentJenkinsMasterPod)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: jenkinsMasterPodName, Namespace: r.Configuration.Jenkins.Namespace}, currentJenkinsMasterPod)
|
||||
if err != nil {
|
||||
return nil, err // don't wrap error
|
||||
}
|
||||
|
|
@ -404,14 +404,14 @@ func (r *ReconcileJenkinsBaseConfiguration) ensureJenkinsMasterPod(meta metav1.O
|
|||
// Check if this Pod already exists
|
||||
currentJenkinsMasterPod, err := r.getJenkinsMasterPod()
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
jenkinsMasterPod := resources.NewJenkinsMasterPod(meta, r.jenkins)
|
||||
jenkinsMasterPod := resources.NewJenkinsMasterPod(meta, r.Configuration.Jenkins)
|
||||
if !reflect.DeepEqual(jenkinsMasterPod.Spec.Containers[0].Command, resources.GetJenkinsMasterContainerBaseCommand()) {
|
||||
r.logger.Info(fmt.Sprintf("spec.master.containers[%s].command has been overridden make sure the command looks like: '%v', otherwise the operator won't configure default user and install plugins",
|
||||
resources.JenkinsMasterContainerName, []string{"bash", "-c", fmt.Sprintf("%s/%s && <custom-command-here> && /sbin/tini -s -- /usr/local/bin/jenkins.sh",
|
||||
resources.JenkinsScriptsVolumePath, resources.InitScriptName)}))
|
||||
}
|
||||
*r.notificationEvents <- notifications.Event{
|
||||
Jenkins: *r.jenkins,
|
||||
*r.Notifications <- notifications.Event{
|
||||
Jenkins: *r.Configuration.Jenkins,
|
||||
Phase: notifications.PhaseBase,
|
||||
LogLevel: v1alpha2.NotificationLogLevelInfo,
|
||||
Message: "Creating a new Jenkins Master Pod",
|
||||
|
|
@ -422,13 +422,13 @@ func (r *ReconcileJenkinsBaseConfiguration) ensureJenkinsMasterPod(meta metav1.O
|
|||
return reconcile.Result{}, stackerr.WithStack(err)
|
||||
}
|
||||
now := metav1.Now()
|
||||
r.jenkins.Status = v1alpha2.JenkinsStatus{
|
||||
r.Configuration.Jenkins.Status = v1alpha2.JenkinsStatus{
|
||||
ProvisionStartTime: &now,
|
||||
LastBackup: r.jenkins.Status.LastBackup,
|
||||
PendingBackup: r.jenkins.Status.LastBackup,
|
||||
LastBackup: r.Configuration.Jenkins.Status.LastBackup,
|
||||
PendingBackup: r.Configuration.Jenkins.Status.LastBackup,
|
||||
UserAndPasswordHash: userAndPasswordHash,
|
||||
}
|
||||
err = r.k8sClient.Update(context.TODO(), r.jenkins)
|
||||
err = r.Client.Update(context.TODO(), r.Configuration.Jenkins)
|
||||
if err != nil {
|
||||
return reconcile.Result{Requeue: true}, err
|
||||
}
|
||||
|
|
@ -437,14 +437,14 @@ func (r *ReconcileJenkinsBaseConfiguration) ensureJenkinsMasterPod(meta metav1.O
|
|||
return reconcile.Result{}, stackerr.WithStack(err)
|
||||
}
|
||||
|
||||
if currentJenkinsMasterPod != nil && isPodTerminating(*currentJenkinsMasterPod) && r.jenkins.Status.UserConfigurationCompletedTime != nil {
|
||||
backupAndRestore := backuprestore.New(r.k8sClient, *r.clientSet, r.logger, r.jenkins, *r.config)
|
||||
if currentJenkinsMasterPod != nil && isPodTerminating(*currentJenkinsMasterPod) && r.Configuration.Jenkins.Status.UserConfigurationCompletedTime != nil {
|
||||
backupAndRestore := backuprestore.New(r.Client, r.ClientSet, r.logger, r.Configuration.Jenkins, *r.config)
|
||||
backupAndRestore.StopBackupTrigger()
|
||||
if r.jenkins.Spec.Backup.MakeBackupBeforePodDeletion {
|
||||
if r.jenkins.Status.LastBackup == r.jenkins.Status.PendingBackup && !r.jenkins.Status.BackupDoneBeforePodDeletion {
|
||||
r.jenkins.Status.PendingBackup = r.jenkins.Status.PendingBackup + 1
|
||||
r.jenkins.Status.BackupDoneBeforePodDeletion = true
|
||||
err = r.k8sClient.Update(context.TODO(), r.jenkins)
|
||||
if r.Configuration.Jenkins.Spec.Backup.MakeBackupBeforePodDeletion {
|
||||
if r.Configuration.Jenkins.Status.LastBackup == r.Configuration.Jenkins.Status.PendingBackup && !r.Configuration.Jenkins.Status.BackupDoneBeforePodDeletion {
|
||||
r.Configuration.Jenkins.Status.PendingBackup = r.Configuration.Jenkins.Status.PendingBackup + 1
|
||||
r.Configuration.Jenkins.Status.BackupDoneBeforePodDeletion = true
|
||||
err = r.Client.Update(context.TODO(), r.Configuration.Jenkins)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
|
@ -455,8 +455,14 @@ func (r *ReconcileJenkinsBaseConfiguration) ensureJenkinsMasterPod(meta metav1.O
|
|||
}
|
||||
return reconcile.Result{Requeue: true}, nil
|
||||
}
|
||||
if currentJenkinsMasterPod != nil && r.isRecreatePodNeeded(*currentJenkinsMasterPod, userAndPasswordHash) {
|
||||
return reconcile.Result{Requeue: true}, r.restartJenkinsMasterPod()
|
||||
|
||||
if currentJenkinsMasterPod == nil {
|
||||
return reconcile.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
messages := r.isRecreatePodNeeded(*currentJenkinsMasterPod, userAndPasswordHash)
|
||||
if hasMessages := len(messages) > 0; hasMessages {
|
||||
return reconcile.Result{Requeue: true}, r.Configuration.RestartJenkinsMasterPod()
|
||||
}
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
|
|
@ -464,7 +470,7 @@ func (r *ReconcileJenkinsBaseConfiguration) ensureJenkinsMasterPod(meta metav1.O
|
|||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) calculateUserAndPasswordHash() (string, error) {
|
||||
credentialsSecret := &corev1.Secret{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: resources.GetOperatorCredentialsSecretName(r.jenkins), Namespace: r.jenkins.ObjectMeta.Namespace}, credentialsSecret)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: resources.GetOperatorCredentialsSecretName(r.Configuration.Jenkins), Namespace: r.Configuration.Jenkins.ObjectMeta.Namespace}, credentialsSecret)
|
||||
if err != nil {
|
||||
return "", stackerr.WithStack(err)
|
||||
}
|
||||
|
|
@ -479,75 +485,64 @@ func isPodTerminating(pod corev1.Pod) bool {
|
|||
return pod.ObjectMeta.DeletionTimestamp != nil
|
||||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) isRecreatePodNeeded(currentJenkinsMasterPod corev1.Pod, userAndPasswordHash string) bool {
|
||||
if userAndPasswordHash != r.jenkins.Status.UserAndPasswordHash {
|
||||
r.logger.Info("User or password have changed, recreating pod")
|
||||
return true
|
||||
func (r *ReconcileJenkinsBaseConfiguration) isRecreatePodNeeded(currentJenkinsMasterPod corev1.Pod, userAndPasswordHash string) []string {
|
||||
var messages []string
|
||||
if userAndPasswordHash != r.Configuration.Jenkins.Status.UserAndPasswordHash {
|
||||
messages = append(messages, "User or password have changed, recreating pod")
|
||||
}
|
||||
|
||||
if r.jenkins.Spec.Restore.RecoveryOnce != 0 && r.jenkins.Status.RestoredBackup != 0 {
|
||||
r.logger.Info(fmt.Sprintf("spec.restore.recoveryOnce is set, recreating pod"))
|
||||
return true
|
||||
if r.Configuration.Jenkins.Spec.Restore.RecoveryOnce != 0 && r.Configuration.Jenkins.Status.RestoredBackup != 0 {
|
||||
messages = append(messages, "spec.restore.recoveryOnce is set, recreating pod")
|
||||
}
|
||||
|
||||
if version.Version != r.jenkins.Status.OperatorVersion {
|
||||
r.logger.Info(fmt.Sprintf("Jenkins Operator version has changed, actual '%+v' new '%+v', recreating pod",
|
||||
r.jenkins.Status.OperatorVersion, version.Version))
|
||||
return true
|
||||
if version.Version != r.Configuration.Jenkins.Status.OperatorVersion {
|
||||
messages = append(messages, fmt.Sprintf("Jenkins Operator version has changed, actual '%+v' new '%+v', recreating pod",
|
||||
r.Configuration.Jenkins.Status.OperatorVersion, version.Version))
|
||||
}
|
||||
|
||||
if currentJenkinsMasterPod.Status.Phase == corev1.PodFailed ||
|
||||
currentJenkinsMasterPod.Status.Phase == corev1.PodSucceeded ||
|
||||
currentJenkinsMasterPod.Status.Phase == corev1.PodUnknown {
|
||||
r.logger.Info(fmt.Sprintf("Invalid Jenkins pod phase '%+v', recreating pod", currentJenkinsMasterPod.Status))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Invalid Jenkins pod phase '%+v', recreating pod", currentJenkinsMasterPod.Status))
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r.jenkins.Spec.Master.SecurityContext, currentJenkinsMasterPod.Spec.SecurityContext) {
|
||||
r.logger.Info(fmt.Sprintf("Jenkins pod security context has changed, actual '%+v' required '%+v', recreating pod",
|
||||
currentJenkinsMasterPod.Spec.SecurityContext, r.jenkins.Spec.Master.SecurityContext))
|
||||
return true
|
||||
if !reflect.DeepEqual(r.Configuration.Jenkins.Spec.Master.SecurityContext, currentJenkinsMasterPod.Spec.SecurityContext) {
|
||||
messages = append(messages, fmt.Sprintf("Jenkins pod security context has changed, actual '%+v' required '%+v', recreating pod",
|
||||
currentJenkinsMasterPod.Spec.SecurityContext, r.Configuration.Jenkins.Spec.Master.SecurityContext))
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r.jenkins.Spec.Master.ImagePullSecrets, currentJenkinsMasterPod.Spec.ImagePullSecrets) {
|
||||
r.logger.Info(fmt.Sprintf("Jenkins Pod ImagePullSecrets has changed, actual '%+v' required '%+v', recreating pod",
|
||||
currentJenkinsMasterPod.Spec.ImagePullSecrets, r.jenkins.Spec.Master.ImagePullSecrets))
|
||||
return true
|
||||
if !reflect.DeepEqual(r.Configuration.Jenkins.Spec.Master.ImagePullSecrets, currentJenkinsMasterPod.Spec.ImagePullSecrets) {
|
||||
messages = append(messages, fmt.Sprintf("Jenkins Pod ImagePullSecrets has changed, actual '%+v' required '%+v', recreating pod",
|
||||
currentJenkinsMasterPod.Spec.ImagePullSecrets, r.Configuration.Jenkins.Spec.Master.ImagePullSecrets))
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r.jenkins.Spec.Master.NodeSelector, currentJenkinsMasterPod.Spec.NodeSelector) {
|
||||
r.logger.Info(fmt.Sprintf("Jenkins pod node selector has changed, actual '%+v' required '%+v', recreating pod",
|
||||
currentJenkinsMasterPod.Spec.NodeSelector, r.jenkins.Spec.Master.NodeSelector))
|
||||
return true
|
||||
if !reflect.DeepEqual(r.Configuration.Jenkins.Spec.Master.NodeSelector, currentJenkinsMasterPod.Spec.NodeSelector) {
|
||||
messages = append(messages, fmt.Sprintf("Jenkins pod node selector has changed, actual '%+v' required '%+v', recreating pod",
|
||||
currentJenkinsMasterPod.Spec.NodeSelector, r.Configuration.Jenkins.Spec.Master.NodeSelector))
|
||||
}
|
||||
|
||||
if len(r.jenkins.Spec.Master.Annotations) > 0 &&
|
||||
!reflect.DeepEqual(r.jenkins.Spec.Master.Annotations, currentJenkinsMasterPod.ObjectMeta.Annotations) {
|
||||
r.logger.Info(fmt.Sprintf("Jenkins pod annotations have changed to '%+v', recreating pod", r.jenkins.Spec.Master.Annotations))
|
||||
return true
|
||||
if len(r.Configuration.Jenkins.Spec.Master.Annotations) > 0 &&
|
||||
!reflect.DeepEqual(r.Configuration.Jenkins.Spec.Master.Annotations, currentJenkinsMasterPod.ObjectMeta.Annotations) {
|
||||
messages = append(messages, fmt.Sprintf("Jenkins pod annotations have changed to '%+v', recreating pod", r.Configuration.Jenkins.Spec.Master.Annotations))
|
||||
}
|
||||
|
||||
if !r.compareVolumes(currentJenkinsMasterPod) {
|
||||
r.logger.Info(fmt.Sprintf("Jenkins pod volumes have changed, actual '%v' required '%v', recreating pod",
|
||||
currentJenkinsMasterPod.Spec.Volumes, r.jenkins.Spec.Master.Volumes))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Jenkins pod volumes have changed, actual '%v' required '%v', recreating pod",
|
||||
currentJenkinsMasterPod.Spec.Volumes, r.Configuration.Jenkins.Spec.Master.Volumes))
|
||||
}
|
||||
|
||||
if len(r.jenkins.Spec.Master.Containers) != len(currentJenkinsMasterPod.Spec.Containers) {
|
||||
r.logger.Info(fmt.Sprintf("Jenkins amount of containers has changed to '%+v', recreating pod", len(r.jenkins.Spec.Master.Containers)))
|
||||
return true
|
||||
if len(r.Configuration.Jenkins.Spec.Master.Containers) != len(currentJenkinsMasterPod.Spec.Containers) {
|
||||
messages = append(messages, fmt.Sprintf("Jenkins amount of containers has changed to '%+v', recreating pod", len(r.Configuration.Jenkins.Spec.Master.Containers)))
|
||||
}
|
||||
|
||||
for _, actualContainer := range currentJenkinsMasterPod.Spec.Containers {
|
||||
if actualContainer.Name == resources.JenkinsMasterContainerName {
|
||||
if changed := r.compareContainers(resources.NewJenkinsMasterContainer(r.jenkins), actualContainer); changed {
|
||||
return true
|
||||
}
|
||||
messages = append(messages, r.compareContainers(resources.NewJenkinsMasterContainer(r.Configuration.Jenkins), actualContainer)...)
|
||||
continue
|
||||
}
|
||||
|
||||
var expectedContainer *corev1.Container
|
||||
for _, jenkinsContainer := range r.jenkins.Spec.Master.Containers {
|
||||
for _, jenkinsContainer := range r.Configuration.Jenkins.Spec.Master.Containers {
|
||||
if jenkinsContainer.Name == actualContainer.Name {
|
||||
tmp := resources.ConvertJenkinsContainerToKubernetesContainer(jenkinsContainer)
|
||||
expectedContainer = &tmp
|
||||
|
|
@ -555,77 +550,76 @@ func (r *ReconcileJenkinsBaseConfiguration) isRecreatePodNeeded(currentJenkinsMa
|
|||
}
|
||||
|
||||
if expectedContainer == nil {
|
||||
r.logger.Info(fmt.Sprintf("Container '%+v' not found in pod, recreating pod", actualContainer))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Container '%+v' not found in pod, recreating pod", actualContainer))
|
||||
}
|
||||
|
||||
if changed := r.compareContainers(*expectedContainer, actualContainer); changed {
|
||||
return true
|
||||
}
|
||||
messages = append(messages, r.compareContainers(*expectedContainer, actualContainer)...)
|
||||
}
|
||||
|
||||
return false
|
||||
return messages
|
||||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) compareContainers(expected corev1.Container, actual corev1.Container) bool {
|
||||
func (r *ReconcileJenkinsBaseConfiguration) compareContainers(expected corev1.Container, actual corev1.Container) []string {
|
||||
var messages []string
|
||||
|
||||
if !reflect.DeepEqual(expected.Args, actual.Args) {
|
||||
r.logger.Info(fmt.Sprintf("Arguments have changed to '%+v' in container '%s', recreating pod", expected.Args, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Arguments have changed to '%+v' in container '%s', recreating pod", expected.Args, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.Command, actual.Command) {
|
||||
r.logger.Info(fmt.Sprintf("Command has changed to '%+v' in container '%s', recreating pod", expected.Command, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Command has changed to '%+v' in container '%s', recreating pod", expected.Command, expected.Name))
|
||||
}
|
||||
if !compareEnv(expected.Env, actual.Env) {
|
||||
r.logger.Info(fmt.Sprintf("Env has changed to '%+v' in container '%s', recreating pod", expected.Env, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Env has changed to '%+v' in container '%s', recreating pod", expected.Env, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.EnvFrom, actual.EnvFrom) {
|
||||
r.logger.Info(fmt.Sprintf("EnvFrom has changed to '%+v' in container '%s', recreating pod", expected.EnvFrom, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("EnvFrom has changed to '%+v' in container '%s', recreating pod", expected.EnvFrom, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.Image, actual.Image) {
|
||||
r.logger.Info(fmt.Sprintf("Image has changed to '%+v' in container '%s', recreating pod", expected.Image, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Image has changed to '%+v' in container '%s', recreating pod", expected.Image, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.ImagePullPolicy, actual.ImagePullPolicy) {
|
||||
r.logger.Info(fmt.Sprintf("Image pull policy has changed to '%+v' in container '%s', recreating pod", expected.ImagePullPolicy, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Image pull policy has changed to '%+v' in container '%s', recreating pod", expected.ImagePullPolicy, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.Lifecycle, actual.Lifecycle) {
|
||||
r.logger.Info(fmt.Sprintf("Lifecycle has changed to '%+v' in container '%s', recreating pod", expected.Lifecycle, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Lifecycle has changed to '%+v' in container '%s', recreating pod", expected.Lifecycle, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.LivenessProbe, actual.LivenessProbe) {
|
||||
r.logger.Info(fmt.Sprintf("Liveness probe has changed to '%+v' in container '%s', recreating pod", expected.LivenessProbe, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Liveness probe has changed to '%+v' in container '%s', recreating pod", expected.LivenessProbe, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.Ports, actual.Ports) {
|
||||
r.logger.Info(fmt.Sprintf("Ports have changed to '%+v' in container '%s', recreating pod", expected.Ports, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Ports have changed to '%+v' in container '%s', recreating pod", expected.Ports, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.ReadinessProbe, actual.ReadinessProbe) {
|
||||
r.logger.Info(fmt.Sprintf("Readiness probe has changed to '%+v' in container '%s', recreating pod", expected.ReadinessProbe, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Readiness probe has changed to '%+v' in container '%s', recreating pod", expected.ReadinessProbe, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.Resources, actual.Resources) {
|
||||
r.logger.Info(fmt.Sprintf("Resources have changed to '%+v' in container '%s', recreating pod", expected.Resources, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Resources have changed to '%+v' in container '%s', recreating pod", expected.Resources, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.SecurityContext, actual.SecurityContext) {
|
||||
r.logger.Info(fmt.Sprintf("Security context has changed to '%+v' in container '%s', recreating pod", expected.SecurityContext, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Security context has changed to '%+v' in container '%s', recreating pod", expected.SecurityContext, expected.Name))
|
||||
}
|
||||
if !reflect.DeepEqual(expected.WorkingDir, actual.WorkingDir) {
|
||||
r.logger.Info(fmt.Sprintf("Working directory has changed to '%+v' in container '%s', recreating pod", expected.WorkingDir, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Working directory has changed to '%+v' in container '%s', recreating pod", expected.WorkingDir, expected.Name))
|
||||
}
|
||||
if !CompareContainerVolumeMounts(expected, actual) {
|
||||
r.logger.Info(fmt.Sprintf("Volume mounts have changed to '%+v' in container '%s', recreating pod", expected.VolumeMounts, expected.Name))
|
||||
return true
|
||||
messages = append(messages, fmt.Sprintf("Volume mounts have changed to '%+v' in container '%s', recreating pod", expected.VolumeMounts, expected.Name))
|
||||
}
|
||||
|
||||
return false
|
||||
return messages
|
||||
}
|
||||
|
||||
func compareEnv(expected, actual []corev1.EnvVar) bool {
|
||||
|
|
@ -662,20 +656,11 @@ func (r *ReconcileJenkinsBaseConfiguration) compareVolumes(actualPod corev1.Pod)
|
|||
}
|
||||
|
||||
return reflect.DeepEqual(
|
||||
append(resources.GetJenkinsMasterPodBaseVolumes(r.jenkins), r.jenkins.Spec.Master.Volumes...),
|
||||
append(resources.GetJenkinsMasterPodBaseVolumes(r.Configuration.Jenkins), r.Configuration.Jenkins.Spec.Master.Volumes...),
|
||||
withoutServiceAccount,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) restartJenkinsMasterPod() error {
|
||||
currentJenkinsMasterPod, err := r.getJenkinsMasterPod()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.logger.Info(fmt.Sprintf("Terminating Jenkins Master Pod %s/%s", currentJenkinsMasterPod.Namespace, currentJenkinsMasterPod.Name))
|
||||
return stackerr.WithStack(r.k8sClient.Delete(context.TODO(), currentJenkinsMasterPod))
|
||||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) detectJenkinsMasterPodStartingIssues(meta metav1.ObjectMeta) (stopReconcileLoop bool, err error) {
|
||||
jenkinsMasterPod, err := r.getJenkinsMasterPod()
|
||||
if err != nil {
|
||||
|
|
@ -683,11 +668,11 @@ func (r *ReconcileJenkinsBaseConfiguration) detectJenkinsMasterPodStartingIssues
|
|||
}
|
||||
|
||||
if jenkinsMasterPod.Status.Phase == corev1.PodPending {
|
||||
timeout := r.jenkins.Status.ProvisionStartTime.Add(time.Minute * 2).UTC()
|
||||
timeout := r.Configuration.Jenkins.Status.ProvisionStartTime.Add(time.Minute * 2).UTC()
|
||||
now := time.Now().UTC()
|
||||
if now.After(timeout) {
|
||||
events := &corev1.EventList{}
|
||||
err = r.k8sClient.List(context.TODO(), &client.ListOptions{Namespace: r.jenkins.Namespace}, events)
|
||||
err = r.Client.List(context.TODO(), &client.ListOptions{Namespace: r.Configuration.Jenkins.Namespace}, events)
|
||||
if err != nil {
|
||||
return false, stackerr.WithStack(err)
|
||||
}
|
||||
|
|
@ -709,7 +694,7 @@ func (r *ReconcileJenkinsBaseConfiguration) detectJenkinsMasterPodStartingIssues
|
|||
func (r *ReconcileJenkinsBaseConfiguration) filterEvents(source corev1.EventList, jenkinsMasterPod corev1.Pod) []string {
|
||||
events := []string{}
|
||||
for _, event := range source.Items {
|
||||
if r.jenkins.Status.ProvisionStartTime.UTC().After(event.LastTimestamp.UTC()) {
|
||||
if r.Configuration.Jenkins.Status.ProvisionStartTime.UTC().After(event.LastTimestamp.UTC()) {
|
||||
continue
|
||||
}
|
||||
if event.Type == corev1.EventTypeNormal {
|
||||
|
|
@ -744,7 +729,7 @@ func (r *ReconcileJenkinsBaseConfiguration) waitForJenkins(meta metav1.ObjectMet
|
|||
for _, containerStatus := range jenkinsMasterPod.Status.ContainerStatuses {
|
||||
if containerStatus.State.Terminated != nil {
|
||||
r.logger.Info(fmt.Sprintf("Container '%s' is terminated, status '%+v', recreating pod", containerStatus.Name, containerStatus))
|
||||
return reconcile.Result{Requeue: true}, r.restartJenkinsMasterPod()
|
||||
return reconcile.Result{Requeue: true}, r.Configuration.RestartJenkinsMasterPod()
|
||||
}
|
||||
if !containerStatus.Ready {
|
||||
r.logger.V(log.VDebug).Info(fmt.Sprintf("Container '%s' not ready, readiness probe failed", containerStatus.Name))
|
||||
|
|
@ -761,9 +746,9 @@ func (r *ReconcileJenkinsBaseConfiguration) waitForJenkins(meta metav1.ObjectMet
|
|||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) ensureJenkinsClient(meta metav1.ObjectMeta) (jenkinsclient.Jenkins, error) {
|
||||
jenkinsURL, err := jenkinsclient.BuildJenkinsAPIUrl(
|
||||
r.jenkins.ObjectMeta.Namespace, resources.GetJenkinsHTTPServiceName(r.jenkins), r.jenkins.Spec.Service.Port, r.local, r.minikube)
|
||||
r.Configuration.Jenkins.ObjectMeta.Namespace, resources.GetJenkinsHTTPServiceName(r.Configuration.Jenkins), r.Configuration.Jenkins.Spec.Service.Port, r.local, r.minikube)
|
||||
|
||||
if prefix, ok := GetJenkinsOpts(*r.jenkins)["prefix"]; ok {
|
||||
if prefix, ok := GetJenkinsOpts(*r.Configuration.Jenkins)["prefix"]; ok {
|
||||
jenkinsURL = jenkinsURL + prefix
|
||||
}
|
||||
|
||||
|
|
@ -773,7 +758,7 @@ func (r *ReconcileJenkinsBaseConfiguration) ensureJenkinsClient(meta metav1.Obje
|
|||
r.logger.V(log.VDebug).Info(fmt.Sprintf("Jenkins API URL '%s'", jenkinsURL))
|
||||
|
||||
credentialsSecret := &corev1.Secret{}
|
||||
err = r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: resources.GetOperatorCredentialsSecretName(r.jenkins), Namespace: r.jenkins.ObjectMeta.Namespace}, credentialsSecret)
|
||||
err = r.Client.Get(context.TODO(), types.NamespacedName{Name: resources.GetOperatorCredentialsSecretName(r.Configuration.Jenkins), Namespace: r.Configuration.Jenkins.ObjectMeta.Namespace}, credentialsSecret)
|
||||
if err != nil {
|
||||
return nil, stackerr.WithStack(err)
|
||||
}
|
||||
|
|
@ -829,11 +814,11 @@ func (r *ReconcileJenkinsBaseConfiguration) ensureBaseConfiguration(jenkinsClien
|
|||
customization := v1alpha2.GroovyScripts{
|
||||
Customization: v1alpha2.Customization{
|
||||
Secret: v1alpha2.SecretRef{Name: ""},
|
||||
Configurations: []v1alpha2.ConfigMapRef{{Name: resources.GetBaseConfigurationConfigMapName(r.jenkins)}},
|
||||
Configurations: []v1alpha2.ConfigMapRef{{Name: resources.GetBaseConfigurationConfigMapName(r.Configuration.Jenkins)}},
|
||||
},
|
||||
}
|
||||
|
||||
groovyClient := groovy.New(jenkinsClient, r.k8sClient, r.logger, r.jenkins, "base-groovy", customization.Customization)
|
||||
groovyClient := groovy.New(jenkinsClient, r.Client, r.logger, r.Configuration.Jenkins, "base-groovy", customization.Customization)
|
||||
|
||||
requeue, err := groovyClient.Ensure(func(name string) bool {
|
||||
return strings.HasSuffix(name, ".groovy")
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/client"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/base/resources"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/log"
|
||||
|
||||
|
|
@ -12,6 +13,7 @@ import (
|
|||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func TestGetJenkinsOpts(t *testing.T) {
|
||||
|
|
@ -233,7 +235,7 @@ func TestCompareVolumes(t *testing.T) {
|
|||
Volumes: resources.GetJenkinsMasterPodBaseVolumes(jenkins),
|
||||
},
|
||||
}
|
||||
reconciler := New(nil, nil, nil, jenkins, false, false, nil, nil, nil)
|
||||
reconciler := New(nil, nil, nil, jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got := reconciler.compareVolumes(pod)
|
||||
|
||||
|
|
@ -257,7 +259,7 @@ func TestCompareVolumes(t *testing.T) {
|
|||
Volumes: resources.GetJenkinsMasterPodBaseVolumes(jenkins),
|
||||
},
|
||||
}
|
||||
reconciler := New(nil, nil, nil, jenkins, false, false, nil, nil, nil)
|
||||
reconciler := New(nil, nil, nil, jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got := reconciler.compareVolumes(pod)
|
||||
|
||||
|
|
@ -281,7 +283,7 @@ func TestCompareVolumes(t *testing.T) {
|
|||
Volumes: append(resources.GetJenkinsMasterPodBaseVolumes(jenkins), corev1.Volume{Name: "added"}),
|
||||
},
|
||||
}
|
||||
reconciler := New(nil, nil, nil, jenkins, false, false, nil, nil, nil)
|
||||
reconciler := New(nil, nil, nil, jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got := reconciler.compareVolumes(pod)
|
||||
|
||||
|
|
@ -296,7 +298,9 @@ func TestReconcileJenkinsBaseConfiguration_verifyPlugins(t *testing.T) {
|
|||
jenkins := &v1alpha2.Jenkins{}
|
||||
r := ReconcileJenkinsBaseConfiguration{
|
||||
logger: log.Log,
|
||||
jenkins: jenkins,
|
||||
Configuration: configuration.Configuration{
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
}
|
||||
pluginsInJenkins := &gojenkins.Plugins{
|
||||
Raw: &gojenkins.PluginResponse{},
|
||||
|
|
@ -322,7 +326,9 @@ func TestReconcileJenkinsBaseConfiguration_verifyPlugins(t *testing.T) {
|
|||
}
|
||||
r := ReconcileJenkinsBaseConfiguration{
|
||||
logger: log.Log,
|
||||
jenkins: jenkins,
|
||||
Configuration: configuration.Configuration{
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
}
|
||||
pluginsInJenkins := &gojenkins.Plugins{
|
||||
Raw: &gojenkins.PluginResponse{
|
||||
|
|
@ -364,7 +370,9 @@ func TestReconcileJenkinsBaseConfiguration_verifyPlugins(t *testing.T) {
|
|||
}
|
||||
r := ReconcileJenkinsBaseConfiguration{
|
||||
logger: log.Log,
|
||||
jenkins: jenkins,
|
||||
Configuration: configuration.Configuration{
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
}
|
||||
pluginsInJenkins := &gojenkins.Plugins{
|
||||
Raw: &gojenkins.PluginResponse{
|
||||
|
|
@ -399,7 +407,9 @@ func TestReconcileJenkinsBaseConfiguration_verifyPlugins(t *testing.T) {
|
|||
}
|
||||
r := ReconcileJenkinsBaseConfiguration{
|
||||
logger: log.Log,
|
||||
jenkins: jenkins,
|
||||
Configuration: configuration.Configuration{
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
}
|
||||
pluginsInJenkins := &gojenkins.Plugins{
|
||||
Raw: &gojenkins.PluginResponse{
|
||||
|
|
@ -434,7 +444,9 @@ func TestReconcileJenkinsBaseConfiguration_verifyPlugins(t *testing.T) {
|
|||
}
|
||||
r := ReconcileJenkinsBaseConfiguration{
|
||||
logger: log.Log,
|
||||
jenkins: jenkins,
|
||||
Configuration: configuration.Configuration{
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
}
|
||||
pluginsInJenkins := &gojenkins.Plugins{
|
||||
Raw: &gojenkins.PluginResponse{
|
||||
|
|
@ -469,7 +481,9 @@ func TestReconcileJenkinsBaseConfiguration_verifyPlugins(t *testing.T) {
|
|||
}
|
||||
r := ReconcileJenkinsBaseConfiguration{
|
||||
logger: log.Log,
|
||||
jenkins: jenkins,
|
||||
Configuration: configuration.Configuration{
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
}
|
||||
pluginsInJenkins := &gojenkins.Plugins{
|
||||
Raw: &gojenkins.PluginResponse{
|
||||
|
|
@ -504,7 +518,9 @@ func TestReconcileJenkinsBaseConfiguration_verifyPlugins(t *testing.T) {
|
|||
}
|
||||
r := ReconcileJenkinsBaseConfiguration{
|
||||
logger: log.Log,
|
||||
jenkins: jenkins,
|
||||
Configuration: configuration.Configuration{
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
}
|
||||
pluginsInJenkins := &gojenkins.Plugins{
|
||||
Raw: &gojenkins.PluginResponse{
|
||||
|
|
@ -531,7 +547,9 @@ func TestReconcileJenkinsBaseConfiguration_verifyPlugins(t *testing.T) {
|
|||
}
|
||||
r := ReconcileJenkinsBaseConfiguration{
|
||||
logger: log.Log,
|
||||
jenkins: jenkins,
|
||||
Configuration: configuration.Configuration{
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
}
|
||||
pluginsInJenkins := &gojenkins.Plugins{
|
||||
Raw: &gojenkins.PluginResponse{
|
||||
|
|
|
|||
|
|
@ -17,11 +17,11 @@ func (r *ReconcileJenkinsBaseConfiguration) createResource(obj metav1.Object) er
|
|||
}
|
||||
|
||||
// Set Jenkins instance as the owner and controller
|
||||
if err := controllerutil.SetControllerReference(r.jenkins, obj, r.scheme); err != nil {
|
||||
if err := controllerutil.SetControllerReference(r.Configuration.Jenkins, obj, r.scheme); err != nil {
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
|
||||
return r.k8sClient.Create(context.TODO(), runtimeObj) // don't wrap error
|
||||
return r.Client.Create(context.TODO(), runtimeObj) // don't wrap error
|
||||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) updateResource(obj metav1.Object) error {
|
||||
|
|
@ -31,9 +31,9 @@ func (r *ReconcileJenkinsBaseConfiguration) updateResource(obj metav1.Object) er
|
|||
}
|
||||
|
||||
// set Jenkins instance as the owner and controller, don't check error(can be already set)
|
||||
_ = controllerutil.SetControllerReference(r.jenkins, obj, r.scheme)
|
||||
_ = controllerutil.SetControllerReference(r.Configuration.Jenkins, obj, r.scheme)
|
||||
|
||||
return r.k8sClient.Update(context.TODO(), runtimeObj) // don't wrap error
|
||||
return r.Client.Update(context.TODO(), runtimeObj) // don't wrap error
|
||||
}
|
||||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) createOrUpdateResource(obj metav1.Object) error {
|
||||
|
|
@ -43,9 +43,9 @@ func (r *ReconcileJenkinsBaseConfiguration) createOrUpdateResource(obj metav1.Ob
|
|||
}
|
||||
|
||||
// set Jenkins instance as the owner and controller, don't check error(can be already set)
|
||||
_ = controllerutil.SetControllerReference(r.jenkins, obj, r.scheme)
|
||||
_ = controllerutil.SetControllerReference(r.Configuration.Jenkins, obj, r.scheme)
|
||||
|
||||
err := r.k8sClient.Create(context.TODO(), runtimeObj)
|
||||
err := r.Client.Create(context.TODO(), runtimeObj)
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
return r.updateResource(obj)
|
||||
} else if err != nil && !errors.IsAlreadyExists(err) {
|
||||
|
|
|
|||
|
|
@ -51,12 +51,12 @@ func (r *ReconcileJenkinsBaseConfiguration) Validate(jenkins *v1alpha2.Jenkins)
|
|||
messages = append(messages, msg...)
|
||||
}
|
||||
|
||||
if msg, err := r.validateCustomization(r.jenkins.Spec.GroovyScripts.Customization, "spec.groovyScripts"); err != nil {
|
||||
if msg, err := r.validateCustomization(r.Configuration.Jenkins.Spec.GroovyScripts.Customization, "spec.groovyScripts"); err != nil {
|
||||
return nil, err
|
||||
} else if len(msg) > 0 {
|
||||
messages = append(messages, msg...)
|
||||
}
|
||||
if msg, err := r.validateCustomization(r.jenkins.Spec.ConfigurationAsCode.Customization, "spec.configurationAsCode"); err != nil {
|
||||
if msg, err := r.validateCustomization(r.Configuration.Jenkins.Spec.ConfigurationAsCode.Customization, "spec.configurationAsCode"); err != nil {
|
||||
return nil, err
|
||||
} else if len(msg) > 0 {
|
||||
messages = append(messages, msg...)
|
||||
|
|
@ -67,7 +67,7 @@ func (r *ReconcileJenkinsBaseConfiguration) Validate(jenkins *v1alpha2.Jenkins)
|
|||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) validateImagePullSecrets() ([]string, error) {
|
||||
var messages []string
|
||||
for _, sr := range r.jenkins.Spec.Master.ImagePullSecrets {
|
||||
for _, sr := range r.Configuration.Jenkins.Spec.Master.ImagePullSecrets {
|
||||
msg, err := r.validateImagePullSecret(sr.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -82,7 +82,7 @@ func (r *ReconcileJenkinsBaseConfiguration) validateImagePullSecrets() ([]string
|
|||
func (r *ReconcileJenkinsBaseConfiguration) validateImagePullSecret(secretName string) ([]string, error) {
|
||||
var messages []string
|
||||
secret := &corev1.Secret{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: secretName, Namespace: r.jenkins.ObjectMeta.Namespace}, secret)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: secretName, Namespace: r.Configuration.Jenkins.ObjectMeta.Namespace}, secret)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
messages = append(messages, fmt.Sprintf("Secret %s not found defined in spec.master.imagePullSecrets", secretName))
|
||||
} else if err != nil && !apierrors.IsNotFound(err) {
|
||||
|
|
@ -107,7 +107,7 @@ func (r *ReconcileJenkinsBaseConfiguration) validateImagePullSecret(secretName s
|
|||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) validateVolumes() ([]string, error) {
|
||||
var messages []string
|
||||
for _, volume := range r.jenkins.Spec.Master.Volumes {
|
||||
for _, volume := range r.Configuration.Jenkins.Spec.Master.Volumes {
|
||||
switch {
|
||||
case volume.ConfigMap != nil:
|
||||
if msg, err := r.validateConfigMapVolume(volume); err != nil {
|
||||
|
|
@ -139,7 +139,7 @@ func (r *ReconcileJenkinsBaseConfiguration) validatePersistentVolumeClaim(volume
|
|||
var messages []string
|
||||
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: volume.PersistentVolumeClaim.ClaimName, Namespace: r.jenkins.ObjectMeta.Namespace}, pvc)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: volume.PersistentVolumeClaim.ClaimName, Namespace: r.Configuration.Jenkins.ObjectMeta.Namespace}, pvc)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
messages = append(messages, fmt.Sprintf("PersistentVolumeClaim '%s' not found for volume '%v'", volume.PersistentVolumeClaim.ClaimName, volume))
|
||||
} else if err != nil && !apierrors.IsNotFound(err) {
|
||||
|
|
@ -156,7 +156,7 @@ func (r *ReconcileJenkinsBaseConfiguration) validateConfigMapVolume(volume corev
|
|||
}
|
||||
|
||||
configMap := &corev1.ConfigMap{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: volume.ConfigMap.Name, Namespace: r.jenkins.ObjectMeta.Namespace}, configMap)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: volume.ConfigMap.Name, Namespace: r.Configuration.Jenkins.ObjectMeta.Namespace}, configMap)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
messages = append(messages, fmt.Sprintf("ConfigMap '%s' not found for volume '%v'", volume.ConfigMap.Name, volume))
|
||||
} else if err != nil && !apierrors.IsNotFound(err) {
|
||||
|
|
@ -173,7 +173,7 @@ func (r *ReconcileJenkinsBaseConfiguration) validateSecretVolume(volume corev1.V
|
|||
}
|
||||
|
||||
secret := &corev1.Secret{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: volume.Secret.SecretName, Namespace: r.jenkins.ObjectMeta.Namespace}, secret)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: volume.Secret.SecretName, Namespace: r.Configuration.Jenkins.ObjectMeta.Namespace}, secret)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
messages = append(messages, fmt.Sprintf("Secret '%s' not found for volume '%v'", volume.Secret.SecretName, volume))
|
||||
} else if err != nil && !apierrors.IsNotFound(err) {
|
||||
|
|
@ -186,8 +186,8 @@ func (r *ReconcileJenkinsBaseConfiguration) validateSecretVolume(volume corev1.V
|
|||
func (r *ReconcileJenkinsBaseConfiguration) validateReservedVolumes() []string {
|
||||
var messages []string
|
||||
|
||||
for _, baseVolume := range resources.GetJenkinsMasterPodBaseVolumes(r.jenkins) {
|
||||
for _, volume := range r.jenkins.Spec.Master.Volumes {
|
||||
for _, baseVolume := range resources.GetJenkinsMasterPodBaseVolumes(r.Configuration.Jenkins) {
|
||||
for _, volume := range r.Configuration.Jenkins.Spec.Master.Volumes {
|
||||
if baseVolume.Name == volume.Name {
|
||||
messages = append(messages, fmt.Sprintf("Jenkins Master pod volume '%s' is reserved please choose different one", volume.Name))
|
||||
}
|
||||
|
|
@ -220,7 +220,7 @@ func (r *ReconcileJenkinsBaseConfiguration) validateContainer(container v1alpha2
|
|||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) validateContainerVolumeMounts(container v1alpha2.Container) []string {
|
||||
var messages []string
|
||||
allVolumes := append(resources.GetJenkinsMasterPodBaseVolumes(r.jenkins), r.jenkins.Spec.Master.Volumes...)
|
||||
allVolumes := append(resources.GetJenkinsMasterPodBaseVolumes(r.Configuration.Jenkins), r.Configuration.Jenkins.Spec.Master.Volumes...)
|
||||
|
||||
for _, volumeMount := range container.VolumeMounts {
|
||||
if len(volumeMount.MountPath) == 0 {
|
||||
|
|
@ -244,14 +244,14 @@ func (r *ReconcileJenkinsBaseConfiguration) validateContainerVolumeMounts(contai
|
|||
|
||||
func (r *ReconcileJenkinsBaseConfiguration) validateJenkinsMasterPodEnvs() []string {
|
||||
var messages []string
|
||||
baseEnvs := resources.GetJenkinsMasterContainerBaseEnvs(r.jenkins)
|
||||
baseEnvs := resources.GetJenkinsMasterContainerBaseEnvs(r.Configuration.Jenkins)
|
||||
baseEnvNames := map[string]string{}
|
||||
for _, env := range baseEnvs {
|
||||
baseEnvNames[env.Name] = env.Value
|
||||
}
|
||||
|
||||
javaOpts := corev1.EnvVar{}
|
||||
for _, userEnv := range r.jenkins.Spec.Master.Containers[0].Env {
|
||||
for _, userEnv := range r.Configuration.Jenkins.Spec.Master.Containers[0].Env {
|
||||
if userEnv.Name == constants.JavaOpsVariableName {
|
||||
javaOpts = userEnv
|
||||
}
|
||||
|
|
@ -348,7 +348,7 @@ func (r *ReconcileJenkinsBaseConfiguration) validateCustomization(customization
|
|||
|
||||
if len(customization.Secret.Name) > 0 {
|
||||
secret := &corev1.Secret{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: customization.Secret.Name, Namespace: r.jenkins.ObjectMeta.Namespace}, secret)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: customization.Secret.Name, Namespace: r.Configuration.Jenkins.ObjectMeta.Namespace}, secret)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
messages = append(messages, fmt.Sprintf("Secret '%s' configured in %s.secret.name not found", customization.Secret.Name, name))
|
||||
} else if err != nil && !apierrors.IsNotFound(err) {
|
||||
|
|
@ -363,7 +363,7 @@ func (r *ReconcileJenkinsBaseConfiguration) validateCustomization(customization
|
|||
}
|
||||
|
||||
configMap := &corev1.ConfigMap{}
|
||||
err := r.k8sClient.Get(context.TODO(), types.NamespacedName{Name: configMapRef.Name, Namespace: r.jenkins.ObjectMeta.Namespace}, configMap)
|
||||
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: configMapRef.Name, Namespace: r.Configuration.Jenkins.ObjectMeta.Namespace}, configMap)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
messages = append(messages, fmt.Sprintf("ConfigMap '%s' configured in %s.configurations[%d] not found", configMapRef.Name, name, index))
|
||||
} else if err != nil && !apierrors.IsNotFound(err) {
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
||||
)
|
||||
|
|
@ -22,7 +23,7 @@ import (
|
|||
func TestValidatePlugins(t *testing.T) {
|
||||
log.SetupLogger(true)
|
||||
baseReconcileLoop := New(nil, nil, log.Log,
|
||||
nil, false, false, nil, nil, nil)
|
||||
nil, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
var requiredBasePlugins []plugins.Plugin
|
||||
var basePlugins []v1alpha2.Plugin
|
||||
|
|
@ -163,7 +164,7 @@ func TestReconcileJenkinsBaseConfiguration_validateImagePullSecrets(t *testing.T
|
|||
assert.NoError(t, err)
|
||||
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, err := baseReconcileLoop.validateImagePullSecrets()
|
||||
assert.Nil(t, got)
|
||||
|
|
@ -184,7 +185,7 @@ func TestReconcileJenkinsBaseConfiguration_validateImagePullSecrets(t *testing.T
|
|||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, _ := baseReconcileLoop.validateImagePullSecrets()
|
||||
|
||||
|
|
@ -218,7 +219,7 @@ func TestReconcileJenkinsBaseConfiguration_validateImagePullSecrets(t *testing.T
|
|||
assert.NoError(t, err)
|
||||
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, _ := baseReconcileLoop.validateImagePullSecrets()
|
||||
|
||||
|
|
@ -252,7 +253,7 @@ func TestReconcileJenkinsBaseConfiguration_validateImagePullSecrets(t *testing.T
|
|||
assert.NoError(t, err)
|
||||
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, _ := baseReconcileLoop.validateImagePullSecrets()
|
||||
|
||||
|
|
@ -286,7 +287,7 @@ func TestReconcileJenkinsBaseConfiguration_validateImagePullSecrets(t *testing.T
|
|||
assert.NoError(t, err)
|
||||
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, _ := baseReconcileLoop.validateImagePullSecrets()
|
||||
|
||||
|
|
@ -320,7 +321,7 @@ func TestReconcileJenkinsBaseConfiguration_validateImagePullSecrets(t *testing.T
|
|||
assert.NoError(t, err)
|
||||
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, _ := baseReconcileLoop.validateImagePullSecrets()
|
||||
|
||||
|
|
@ -352,7 +353,7 @@ func TestValidateJenkinsMasterPodEnvs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateJenkinsMasterPodEnvs()
|
||||
assert.Nil(t, got)
|
||||
})
|
||||
|
|
@ -378,7 +379,7 @@ func TestValidateJenkinsMasterPodEnvs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateJenkinsMasterPodEnvs()
|
||||
|
||||
assert.Equal(t, got, []string{"Jenkins Master container env 'JENKINS_HOME' cannot be overridden"})
|
||||
|
|
@ -401,7 +402,7 @@ func TestValidateJenkinsMasterPodEnvs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateJenkinsMasterPodEnvs()
|
||||
|
||||
assert.Equal(t, got, []string{"Jenkins Master container env 'JAVA_OPTS' doesn't have required flag '-Djava.awt.headless=true'"})
|
||||
|
|
@ -424,7 +425,7 @@ func TestValidateJenkinsMasterPodEnvs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateJenkinsMasterPodEnvs()
|
||||
|
||||
assert.Equal(t, got, []string{"Jenkins Master container env 'JAVA_OPTS' doesn't have required flag '-Djenkins.install.runSetupWizard=false'"})
|
||||
|
|
@ -445,7 +446,7 @@ func TestValidateReservedVolumes(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateReservedVolumes()
|
||||
assert.Nil(t, got)
|
||||
})
|
||||
|
|
@ -462,7 +463,7 @@ func TestValidateReservedVolumes(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateReservedVolumes()
|
||||
|
||||
assert.Equal(t, got, []string{"Jenkins Master pod volume 'jenkins-home' is reserved please choose different one"})
|
||||
|
|
@ -477,7 +478,7 @@ func TestValidateContainerVolumeMounts(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateContainerVolumeMounts(v1alpha2.Container{})
|
||||
assert.Nil(t, got)
|
||||
})
|
||||
|
|
@ -504,7 +505,7 @@ func TestValidateContainerVolumeMounts(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateContainerVolumeMounts(jenkins.Spec.Master.Containers[0])
|
||||
assert.Nil(t, got)
|
||||
})
|
||||
|
|
@ -531,7 +532,7 @@ func TestValidateContainerVolumeMounts(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateContainerVolumeMounts(jenkins.Spec.Master.Containers[0])
|
||||
assert.Equal(t, got, []string{"mountPath not set for 'example' volume mount in container ''"})
|
||||
})
|
||||
|
|
@ -553,7 +554,7 @@ func TestValidateContainerVolumeMounts(t *testing.T) {
|
|||
},
|
||||
}
|
||||
baseReconcileLoop := New(nil, nil, logf.ZapLogger(false),
|
||||
&jenkins, false, false, nil, nil, nil)
|
||||
&jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got := baseReconcileLoop.validateContainerVolumeMounts(jenkins.Spec.Master.Containers[0])
|
||||
|
||||
assert.Equal(t, got, []string{"Not found volume for 'missing-volume' volume mount in container ''"})
|
||||
|
|
@ -574,7 +575,7 @@ func TestValidateConfigMapVolume(t *testing.T) {
|
|||
}
|
||||
fakeClient := fake.NewFakeClient()
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
nil, false, false, nil, nil, nil)
|
||||
nil, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, err := baseReconcileLoop.validateConfigMapVolume(volume)
|
||||
|
||||
|
|
@ -600,7 +601,7 @@ func TestValidateConfigMapVolume(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), &configMap)
|
||||
assert.NoError(t, err)
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
jenkins, false, false, nil, nil, nil)
|
||||
jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, err := baseReconcileLoop.validateConfigMapVolume(volume)
|
||||
|
||||
|
|
@ -624,7 +625,7 @@ func TestValidateConfigMapVolume(t *testing.T) {
|
|||
}
|
||||
fakeClient := fake.NewFakeClient()
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
jenkins, false, false, nil, nil, nil)
|
||||
jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, err := baseReconcileLoop.validateConfigMapVolume(volume)
|
||||
|
||||
|
|
@ -648,7 +649,7 @@ func TestValidateSecretVolume(t *testing.T) {
|
|||
}
|
||||
fakeClient := fake.NewFakeClient()
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
nil, false, false, nil, nil, nil)
|
||||
nil, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, err := baseReconcileLoop.validateSecretVolume(volume)
|
||||
|
||||
|
|
@ -672,7 +673,7 @@ func TestValidateSecretVolume(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), &secret)
|
||||
assert.NoError(t, err)
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
jenkins, false, false, nil, nil, nil)
|
||||
jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, err := baseReconcileLoop.validateSecretVolume(volume)
|
||||
|
||||
|
|
@ -694,8 +695,7 @@ func TestValidateSecretVolume(t *testing.T) {
|
|||
}
|
||||
fakeClient := fake.NewFakeClient()
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
jenkins, false, false, nil, nil, nil)
|
||||
|
||||
jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
got, err := baseReconcileLoop.validateSecretVolume(volume)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -717,7 +717,7 @@ func TestValidateCustomization(t *testing.T) {
|
|||
customization := v1alpha2.Customization{}
|
||||
fakeClient := fake.NewFakeClient()
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
jenkins, false, false, nil, nil, nil)
|
||||
jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
|
||||
got, err := baseReconcileLoop.validateCustomization(customization, "spec.groovyScripts")
|
||||
|
||||
|
|
@ -737,7 +737,7 @@ func TestValidateCustomization(t *testing.T) {
|
|||
}
|
||||
fakeClient := fake.NewFakeClient()
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
jenkins, false, false, nil, nil, nil)
|
||||
jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
err := fakeClient.Create(context.TODO(), secret)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -766,7 +766,7 @@ func TestValidateCustomization(t *testing.T) {
|
|||
}
|
||||
fakeClient := fake.NewFakeClient()
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
jenkins, false, false, nil, nil, nil)
|
||||
jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
err := fakeClient.Create(context.TODO(), secret)
|
||||
require.NoError(t, err)
|
||||
err = fakeClient.Create(context.TODO(), configMap)
|
||||
|
|
@ -791,7 +791,7 @@ func TestValidateCustomization(t *testing.T) {
|
|||
}
|
||||
fakeClient := fake.NewFakeClient()
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
jenkins, false, false, nil, nil, nil)
|
||||
jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
err := fakeClient.Create(context.TODO(), configMap)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
@ -814,7 +814,7 @@ func TestValidateCustomization(t *testing.T) {
|
|||
}
|
||||
fakeClient := fake.NewFakeClient()
|
||||
baseReconcileLoop := New(fakeClient, nil, logf.ZapLogger(false),
|
||||
jenkins, false, false, nil, nil, nil)
|
||||
jenkins, false, false, kubernetes.Clientset{}, nil, nil)
|
||||
err := fakeClient.Create(context.TODO(), secret)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,52 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/base/resources"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/notifications"
|
||||
|
||||
stackerr "github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// Configuration holds required for Jenkins configuration
|
||||
type Configuration struct {
|
||||
Client client.Client
|
||||
ClientSet kubernetes.Clientset
|
||||
Notifications *chan notifications.Event
|
||||
Jenkins *v1alpha2.Jenkins
|
||||
}
|
||||
|
||||
// RestartJenkinsMasterPod terminate Jenkins master pod and notifies about it
|
||||
func (c *Configuration) RestartJenkinsMasterPod() error {
|
||||
currentJenkinsMasterPod, err := c.getJenkinsMasterPod()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*c.Notifications <- notifications.Event{
|
||||
Jenkins: *c.Jenkins,
|
||||
Phase: notifications.PhaseBase,
|
||||
LogLevel: v1alpha2.NotificationLogLevelInfo,
|
||||
Message: fmt.Sprintf("Terminating Jenkins Master Pod %s/%s.", currentJenkinsMasterPod.Namespace, currentJenkinsMasterPod.Name),
|
||||
MessagesVerbose: []string{},
|
||||
}
|
||||
|
||||
return stackerr.WithStack(c.Client.Delete(context.TODO(), currentJenkinsMasterPod))
|
||||
}
|
||||
|
||||
func (c *Configuration) getJenkinsMasterPod() (*corev1.Pod, error) {
|
||||
jenkinsMasterPodName := resources.GetJenkinsMasterPodName(*c.Jenkins)
|
||||
currentJenkinsMasterPod := &corev1.Pod{}
|
||||
err := c.Client.Get(context.TODO(), types.NamespacedName{Name: jenkinsMasterPodName, Namespace: c.Jenkins.Namespace}, currentJenkinsMasterPod)
|
||||
if err != nil {
|
||||
return nil, err // don't wrap error
|
||||
}
|
||||
return currentJenkinsMasterPod, nil
|
||||
}
|
||||
|
|
@ -5,45 +5,48 @@ import (
|
|||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
jenkinsclient "github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/client"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/backuprestore"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/base/resources"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/user/casc"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/user/seedjobs"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/groovy"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/notifications"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
k8s "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
)
|
||||
|
||||
// ReconcileUserConfiguration defines values required for Jenkins user configuration
|
||||
type ReconcileUserConfiguration struct {
|
||||
k8sClient k8s.Client
|
||||
configuration.Configuration
|
||||
jenkinsClient jenkinsclient.Jenkins
|
||||
logger logr.Logger
|
||||
jenkins *v1alpha2.Jenkins
|
||||
clientSet kubernetes.Clientset
|
||||
config rest.Config
|
||||
}
|
||||
|
||||
// New create structure which takes care of user configuration
|
||||
func New(k8sClient k8s.Client, jenkinsClient jenkinsclient.Jenkins, logger logr.Logger,
|
||||
jenkins *v1alpha2.Jenkins, clientSet kubernetes.Clientset, config rest.Config) *ReconcileUserConfiguration {
|
||||
func New(client client.Client, jenkinsClient jenkinsclient.Jenkins, logger logr.Logger,
|
||||
jenkins *v1alpha2.Jenkins, clientSet kubernetes.Clientset, config rest.Config, notificationEvents *chan notifications.Event) *ReconcileUserConfiguration {
|
||||
return &ReconcileUserConfiguration{
|
||||
k8sClient: k8sClient,
|
||||
Configuration: configuration.Configuration{
|
||||
Client: client,
|
||||
ClientSet: clientSet,
|
||||
Notifications: notificationEvents,
|
||||
Jenkins: jenkins,
|
||||
},
|
||||
jenkinsClient: jenkinsClient,
|
||||
logger: logger,
|
||||
jenkins: jenkins,
|
||||
clientSet: clientSet,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Reconcile it's a main reconciliation loop for user supplied configuration
|
||||
func (r *ReconcileUserConfiguration) Reconcile() (reconcile.Result, error) {
|
||||
backupAndRestore := backuprestore.New(r.k8sClient, r.clientSet, r.logger, r.jenkins, r.config)
|
||||
backupAndRestore := backuprestore.New(r.Client, r.ClientSet, r.logger, r.Configuration.Jenkins, r.config)
|
||||
|
||||
result, err := r.ensureSeedJobs()
|
||||
if err != nil {
|
||||
|
|
@ -76,8 +79,8 @@ func (r *ReconcileUserConfiguration) Reconcile() (reconcile.Result, error) {
|
|||
}
|
||||
|
||||
func (r *ReconcileUserConfiguration) ensureSeedJobs() (reconcile.Result, error) {
|
||||
seedJobs := seedjobs.New(r.jenkinsClient, r.k8sClient, r.logger)
|
||||
done, err := seedJobs.EnsureSeedJobs(r.jenkins)
|
||||
seedJobs := seedjobs.New(r.jenkinsClient, r.Configuration, r.logger)
|
||||
done, err := seedJobs.EnsureSeedJobs(r.Configuration.Jenkins)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
|
@ -88,7 +91,7 @@ func (r *ReconcileUserConfiguration) ensureSeedJobs() (reconcile.Result, error)
|
|||
}
|
||||
|
||||
func (r *ReconcileUserConfiguration) ensureUserConfiguration(jenkinsClient jenkinsclient.Jenkins) (reconcile.Result, error) {
|
||||
groovyClient := groovy.New(jenkinsClient, r.k8sClient, r.logger, r.jenkins, "user-groovy", r.jenkins.Spec.GroovyScripts.Customization)
|
||||
groovyClient := groovy.New(jenkinsClient, r.Client, r.logger, r.Configuration.Jenkins, "user-groovy", r.Configuration.Jenkins.Spec.GroovyScripts.Customization)
|
||||
|
||||
requeue, err := groovyClient.WaitForSecretSynchronization(resources.GroovyScriptsSecretVolumePath)
|
||||
if err != nil {
|
||||
|
|
@ -107,8 +110,8 @@ func (r *ReconcileUserConfiguration) ensureUserConfiguration(jenkinsClient jenki
|
|||
return reconcile.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
configurationAsCodeClient := casc.New(jenkinsClient, r.k8sClient, r.logger, r.jenkins)
|
||||
requeue, err = configurationAsCodeClient.Ensure(r.jenkins)
|
||||
configurationAsCodeClient := casc.New(jenkinsClient, r.Client, r.logger, r.Configuration.Jenkins)
|
||||
requeue, err = configurationAsCodeClient.Ensure(r.Configuration.Jenkins)
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/jenkinsci/kubernetes-operator/internal/render"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
jenkinsclient "github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/client"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/base/resources"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/constants"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/groovy"
|
||||
|
|
@ -24,7 +25,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
k8s "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -131,16 +131,16 @@ jenkins.getQueue().schedule(jobRef)
|
|||
|
||||
// SeedJobs defines API for configuring and ensuring Jenkins Seed Jobs and Deploy Keys
|
||||
type SeedJobs struct {
|
||||
configuration.Configuration
|
||||
jenkinsClient jenkinsclient.Jenkins
|
||||
k8sClient k8s.Client
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
// New creates SeedJobs object
|
||||
func New(jenkinsClient jenkinsclient.Jenkins, k8sClient k8s.Client, logger logr.Logger) *SeedJobs {
|
||||
func New(jenkinsClient jenkinsclient.Jenkins, config configuration.Configuration, logger logr.Logger) *SeedJobs {
|
||||
return &SeedJobs{
|
||||
Configuration: config,
|
||||
jenkinsClient: jenkinsClient,
|
||||
k8sClient: k8sClient,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
|
@ -149,16 +149,16 @@ func New(jenkinsClient jenkinsclient.Jenkins, k8sClient k8s.Client, logger logr.
|
|||
func (s *SeedJobs) EnsureSeedJobs(jenkins *v1alpha2.Jenkins) (done bool, err error) {
|
||||
if s.isRecreatePodNeeded(*jenkins) {
|
||||
s.logger.Info("Some seed job has been deleted, recreating pod")
|
||||
return false, s.restartJenkinsMasterPod(*jenkins)
|
||||
return false, s.RestartJenkinsMasterPod()
|
||||
}
|
||||
|
||||
if len(jenkins.Spec.SeedJobs) > 0 {
|
||||
err := s.createAgent(s.jenkinsClient, s.k8sClient, jenkins, jenkins.Namespace, AgentName)
|
||||
err := s.createAgent(s.jenkinsClient, s.Client, jenkins, jenkins.Namespace, AgentName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else if len(jenkins.Spec.SeedJobs) == 0 {
|
||||
err := s.k8sClient.Delete(context.TODO(), &appsv1.Deployment{
|
||||
err := s.Client.Delete(context.TODO(), &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: jenkins.Namespace,
|
||||
Name: agentDeploymentName(*jenkins, AgentName),
|
||||
|
|
@ -185,7 +185,7 @@ func (s *SeedJobs) EnsureSeedJobs(jenkins *v1alpha2.Jenkins) (done bool, err err
|
|||
seedJobIDs := s.getAllSeedJobIDs(*jenkins)
|
||||
if done && !reflect.DeepEqual(seedJobIDs, jenkins.Status.CreatedSeedJobs) {
|
||||
jenkins.Status.CreatedSeedJobs = seedJobIDs
|
||||
return false, stackerr.WithStack(s.k8sClient.Update(context.TODO(), jenkins))
|
||||
return false, stackerr.WithStack(s.Client.Update(context.TODO(), jenkins))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
|
@ -193,7 +193,7 @@ func (s *SeedJobs) EnsureSeedJobs(jenkins *v1alpha2.Jenkins) (done bool, err err
|
|||
|
||||
// createJob is responsible for creating jenkins job which configures jenkins seed jobs and deploy keys
|
||||
func (s *SeedJobs) createJobs(jenkins *v1alpha2.Jenkins) (requeue bool, err error) {
|
||||
groovyClient := groovy.New(s.jenkinsClient, s.k8sClient, s.logger, jenkins, "seed-jobs", jenkins.Spec.GroovyScripts.Customization)
|
||||
groovyClient := groovy.New(s.jenkinsClient, s.Client, s.logger, jenkins, "seed-jobs", jenkins.Spec.GroovyScripts.Customization)
|
||||
for _, seedJob := range jenkins.Spec.SeedJobs {
|
||||
credentialValue, err := s.credentialValue(jenkins.Namespace, seedJob)
|
||||
if err != nil {
|
||||
|
|
@ -232,14 +232,14 @@ func (s *SeedJobs) ensureLabelsForSecrets(jenkins v1alpha2.Jenkins) error {
|
|||
|
||||
secret := &corev1.Secret{}
|
||||
namespaceName := types.NamespacedName{Namespace: jenkins.ObjectMeta.Namespace, Name: seedJob.CredentialID}
|
||||
err := s.k8sClient.Get(context.TODO(), namespaceName, secret)
|
||||
err := s.Client.Get(context.TODO(), namespaceName, secret)
|
||||
if err != nil {
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
|
||||
if !resources.VerifyIfLabelsAreSet(secret, requiredLabels) {
|
||||
secret.ObjectMeta.Labels = requiredLabels
|
||||
if err = s.k8sClient.Update(context.TODO(), secret); err != nil {
|
||||
if err = s.Client.Update(context.TODO(), secret); err != nil {
|
||||
return stackerr.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -253,7 +253,7 @@ func (s *SeedJobs) credentialValue(namespace string, seedJob v1alpha2.SeedJob) (
|
|||
if seedJob.JenkinsCredentialType == v1alpha2.BasicSSHCredentialType || seedJob.JenkinsCredentialType == v1alpha2.UsernamePasswordCredentialType {
|
||||
secret := &corev1.Secret{}
|
||||
namespaceName := types.NamespacedName{Namespace: namespace, Name: seedJob.CredentialID}
|
||||
err := s.k8sClient.Get(context.TODO(), namespaceName, secret)
|
||||
err := s.Client.Get(context.TODO(), namespaceName, secret)
|
||||
if err != nil {
|
||||
return "", stackerr.WithStack(err)
|
||||
}
|
||||
|
|
@ -274,27 +274,6 @@ func (s *SeedJobs) getAllSeedJobIDs(jenkins v1alpha2.Jenkins) []string {
|
|||
return ids
|
||||
}
|
||||
|
||||
//TODO move to k8sClient
|
||||
func (s *SeedJobs) getJenkinsMasterPod(jenkins v1alpha2.Jenkins) (*corev1.Pod, error) {
|
||||
jenkinsMasterPodName := resources.GetJenkinsMasterPodName(jenkins)
|
||||
currentJenkinsMasterPod := &corev1.Pod{}
|
||||
err := s.k8sClient.Get(context.TODO(), types.NamespacedName{Name: jenkinsMasterPodName, Namespace: jenkins.Namespace}, currentJenkinsMasterPod)
|
||||
if err != nil {
|
||||
return nil, err // don't wrap error
|
||||
}
|
||||
return currentJenkinsMasterPod, nil
|
||||
}
|
||||
|
||||
//TODO move to k8sClient
|
||||
func (s *SeedJobs) restartJenkinsMasterPod(jenkins v1alpha2.Jenkins) error {
|
||||
currentJenkinsMasterPod, err := s.getJenkinsMasterPod(jenkins)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.logger.Info(fmt.Sprintf("Terminating Jenkins Master Pod %s/%s", currentJenkinsMasterPod.Namespace, currentJenkinsMasterPod.Name))
|
||||
return stackerr.WithStack(s.k8sClient.Delete(context.TODO(), currentJenkinsMasterPod))
|
||||
}
|
||||
|
||||
func (s *SeedJobs) isRecreatePodNeeded(jenkins v1alpha2.Jenkins) bool {
|
||||
for _, createdSeedJob := range jenkins.Status.CreatedSeedJobs {
|
||||
found := false
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
jenkinsclient "github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/client"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration/base/resources"
|
||||
|
||||
"github.com/bndr/gojenkins"
|
||||
|
|
@ -17,6 +18,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
||||
|
|
@ -86,6 +88,12 @@ func TestEnsureSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobCreatingScript, err := seedJobCreatingGroovyScript(jenkins.Spec.SeedJobs[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
|
@ -94,7 +102,7 @@ func TestEnsureSeedJobs(t *testing.T) {
|
|||
jenkinsClient.EXPECT().GetNodeSecret(AgentName).Return(agentSecret, nil).AnyTimes()
|
||||
jenkinsClient.EXPECT().ExecuteScript(seedJobCreatingScript).AnyTimes()
|
||||
|
||||
seedJobClient := New(jenkinsClient, fakeClient, logger)
|
||||
seedJobClient := New(jenkinsClient, config, logger)
|
||||
|
||||
// when
|
||||
_, err = seedJobClient.EnsureSeedJobs(jenkins)
|
||||
|
|
@ -109,6 +117,7 @@ func TestEnsureSeedJobs(t *testing.T) {
|
|||
|
||||
t.Run("delete agent deployment when no seed jobs", func(t *testing.T) {
|
||||
// given
|
||||
logger := logf.ZapLogger(false)
|
||||
ctrl := gomock.NewController(t)
|
||||
ctx := context.TODO()
|
||||
defer ctrl.Finish()
|
||||
|
|
@ -122,11 +131,17 @@ func TestEnsureSeedJobs(t *testing.T) {
|
|||
err := v1alpha2.SchemeBuilder.AddToScheme(scheme.Scheme)
|
||||
assert.NoError(t, err)
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
jenkinsClient.EXPECT().GetNode(AgentName).AnyTimes()
|
||||
jenkinsClient.EXPECT().CreateNode(AgentName, 1, "The jenkins-operator generated agent", "/home/jenkins", AgentName).AnyTimes()
|
||||
jenkinsClient.EXPECT().GetNodeSecret(AgentName).Return(agentSecret, nil).AnyTimes()
|
||||
|
||||
seedJobsClient := New(jenkinsClient, fakeClient, nil)
|
||||
seedJobsClient := New(jenkinsClient, config, logger)
|
||||
|
||||
err = fakeClient.Create(ctx, &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -152,6 +167,7 @@ func TestEnsureSeedJobs(t *testing.T) {
|
|||
func TestCreateAgent(t *testing.T) {
|
||||
t.Run("don't fail when deployment is already created", func(t *testing.T) {
|
||||
// given
|
||||
logger := logf.ZapLogger(false)
|
||||
ctrl := gomock.NewController(t)
|
||||
ctx := context.TODO()
|
||||
defer ctrl.Finish()
|
||||
|
|
@ -168,7 +184,13 @@ func TestCreateAgent(t *testing.T) {
|
|||
jenkinsClient.EXPECT().CreateNode(AgentName, 1, "The jenkins-operator generated agent", "/home/jenkins", AgentName).AnyTimes()
|
||||
jenkinsClient.EXPECT().GetNodeSecret(AgentName).Return(agentSecret, nil).AnyTimes()
|
||||
|
||||
seedJobsClient := New(jenkinsClient, fakeClient, nil)
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobsClient := New(jenkinsClient, config, logger)
|
||||
|
||||
err = fakeClient.Create(ctx, &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -187,7 +209,12 @@ func TestCreateAgent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSeedJobs_isRecreatePodNeeded(t *testing.T) {
|
||||
seedJobsClient := New(nil, nil, nil)
|
||||
config := configuration.Configuration{
|
||||
Client: nil,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
seedJobsClient := New(nil, config, nil)
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
jenkins := v1alpha2.Jenkins{}
|
||||
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func (s *SeedJobs) ValidateSeedJobs(jenkins v1alpha2.Jenkins) ([]string, error)
|
|||
if seedJob.JenkinsCredentialType == v1alpha2.BasicSSHCredentialType || seedJob.JenkinsCredentialType == v1alpha2.UsernamePasswordCredentialType {
|
||||
secret := &v1.Secret{}
|
||||
namespaceName := types.NamespacedName{Namespace: jenkins.Namespace, Name: seedJob.CredentialID}
|
||||
err := s.k8sClient.Get(context.TODO(), namespaceName, secret)
|
||||
err := s.Client.Get(context.TODO(), namespaceName, secret)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
messages = append(messages, fmt.Sprintf("seedJob `%s` required secret '%s' with Jenkins credential not found", seedJob.ID, seedJob.CredentialID))
|
||||
} else if err != nil {
|
||||
|
|
|
|||
|
|
@ -5,10 +5,12 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/apis/jenkins/v1alpha2"
|
||||
"github.com/jenkinsci/kubernetes-operator/pkg/controller/jenkins/configuration"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
||||
)
|
||||
|
|
@ -72,7 +74,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -92,7 +102,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -126,7 +144,13 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), secret)
|
||||
assert.NoError(t, err)
|
||||
|
||||
seedJobs := New(nil, fakeClient, logf.ZapLogger(false))
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -159,7 +183,13 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), secret)
|
||||
assert.NoError(t, err)
|
||||
|
||||
seedJobs := New(nil, fakeClient, logf.ZapLogger(false))
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -193,7 +223,13 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), secret)
|
||||
assert.NoError(t, err)
|
||||
|
||||
seedJobs := New(nil, fakeClient, logf.ZapLogger(false))
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -216,7 +252,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -237,7 +281,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -258,7 +310,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -279,7 +339,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -313,7 +381,13 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), secret)
|
||||
assert.NoError(t, err)
|
||||
|
||||
seedJobs := New(nil, fakeClient, logf.ZapLogger(false))
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -346,7 +420,13 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), secret)
|
||||
assert.NoError(t, err)
|
||||
|
||||
seedJobs := New(nil, fakeClient, logf.ZapLogger(false))
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -380,7 +460,13 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), secret)
|
||||
assert.NoError(t, err)
|
||||
|
||||
seedJobs := New(nil, fakeClient, logf.ZapLogger(false))
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -413,7 +499,13 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), secret)
|
||||
assert.NoError(t, err)
|
||||
|
||||
seedJobs := New(nil, fakeClient, logf.ZapLogger(false))
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -446,7 +538,13 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
err := fakeClient.Create(context.TODO(), secret)
|
||||
assert.NoError(t, err)
|
||||
|
||||
seedJobs := New(nil, fakeClient, logf.ZapLogger(false))
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -470,7 +568,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -495,7 +601,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -518,7 +632,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -547,7 +669,15 @@ func TestValidateSeedJobs(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
seedJobs := New(nil, fake.NewFakeClient(), logf.ZapLogger(false))
|
||||
fakeClient := fake.NewFakeClient()
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: fakeClient,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
seedJobs := New(nil, config, logf.ZapLogger(false))
|
||||
result, err := seedJobs.ValidateSeedJobs(jenkins)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -560,7 +690,14 @@ func TestValidateIfIDIsUnique(t *testing.T) {
|
|||
seedJobs := []v1alpha2.SeedJob{
|
||||
{ID: "first"}, {ID: "second"},
|
||||
}
|
||||
ctrl := New(nil, nil, logf.ZapLogger(false))
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: nil,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
ctrl := New(nil, config, logf.ZapLogger(false))
|
||||
got := ctrl.validateIfIDIsUnique(seedJobs)
|
||||
assert.Nil(t, got)
|
||||
})
|
||||
|
|
@ -568,7 +705,14 @@ func TestValidateIfIDIsUnique(t *testing.T) {
|
|||
seedJobs := []v1alpha2.SeedJob{
|
||||
{ID: "first"}, {ID: "first"},
|
||||
}
|
||||
ctrl := New(nil, nil, logf.ZapLogger(false))
|
||||
|
||||
config := configuration.Configuration{
|
||||
Client: nil,
|
||||
ClientSet: kubernetes.Clientset{},
|
||||
Notifications: nil,
|
||||
}
|
||||
|
||||
ctrl := New(nil, config, logf.ZapLogger(false))
|
||||
got := ctrl.validateIfIDIsUnique(seedJobs)
|
||||
|
||||
assert.Equal(t, got, []string{"'first' seed job ID is not unique"})
|
||||
|
|
|
|||
|
|
@ -8,11 +8,11 @@ import (
|
|||
|
||||
// Validate validates Jenkins CR Spec section
|
||||
func (r *ReconcileUserConfiguration) Validate(jenkins *v1alpha2.Jenkins) ([]string, error) {
|
||||
backupAndRestore := backuprestore.New(r.k8sClient, r.clientSet, r.logger, r.jenkins, r.config)
|
||||
backupAndRestore := backuprestore.New(r.Client, r.ClientSet, r.logger, r.Configuration.Jenkins, r.config)
|
||||
if msg := backupAndRestore.Validate(); msg != nil {
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
seedJobs := seedjobs.New(r.jenkinsClient, r.k8sClient, r.logger)
|
||||
seedJobs := seedjobs.New(r.jenkinsClient, r.Configuration, r.logger)
|
||||
return seedJobs.ValidateSeedJobs(*jenkins)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -202,7 +202,7 @@ func (r *ReconcileJenkins) reconcile(request reconcile.Request, logger logr.Logg
|
|||
return reconcile.Result{}, jenkins, err
|
||||
}
|
||||
// Reconcile base configuration
|
||||
baseConfiguration := base.New(r.client, r.scheme, logger, jenkins, r.local, r.minikube, &r.clientSet, &r.config, r.notificationEvents)
|
||||
baseConfiguration := base.New(r.client, r.scheme, logger, jenkins, r.local, r.minikube, r.clientSet, &r.config, r.notificationEvents)
|
||||
|
||||
messages, err := baseConfiguration.Validate(jenkins)
|
||||
if err != nil {
|
||||
|
|
@ -255,7 +255,7 @@ func (r *ReconcileJenkins) reconcile(request reconcile.Request, logger logr.Logg
|
|||
logger.Info(message)
|
||||
}
|
||||
// Reconcile user configuration
|
||||
userConfiguration := user.New(r.client, jenkinsClient, logger, jenkins, r.clientSet, r.config)
|
||||
userConfiguration := user.New(r.client, jenkinsClient, logger, jenkins, r.clientSet, r.config, r.notificationEvents)
|
||||
|
||||
messages, err = userConfiguration.Validate(jenkins)
|
||||
if err != nil {
|
||||
|
|
|
|||
Loading…
Reference in New Issue