Mostly cosmetic changes to logs. Removed quotes from diff. Move all object diffs to text diff. Enabled padding for log level.
This commit is contained in:
parent
2aeaad03f3
commit
0143a470b1
1
go.sum
1
go.sum
|
|
@ -438,6 +438,7 @@ k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF
|
|||
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
||||
k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0=
|
||||
k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig=
|
||||
k8s.io/apimachinery v0.19.3 h1:bpIQXlKjB4cB/oNpnNnV+BybGPR7iP5oYpsOTEJ4hgc=
|
||||
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
|
||||
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
|
||||
k8s.io/client-go v0.18.8 h1:SdbLpIxk5j5YbFr1b7fq8S7mDgDjYmUxSbszyoesoDM=
|
||||
|
|
|
|||
|
|
@ -371,11 +371,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
//TODO: improve me
|
||||
if *c.Statefulset.Spec.Replicas != *statefulSet.Spec.Replicas {
|
||||
match = false
|
||||
reasons = append(reasons, "new statefulset's number of replicas doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's number of replicas does not match the current one")
|
||||
}
|
||||
if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) {
|
||||
match = false
|
||||
reasons = append(reasons, "new statefulset's annotations doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's annotations does not match the current one")
|
||||
}
|
||||
|
||||
needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons)
|
||||
|
|
@ -392,24 +392,24 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName {
|
||||
needsReplace = true
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's serviceAccountName service account name doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's serviceAccountName service account name does not match the current one")
|
||||
}
|
||||
if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds {
|
||||
needsReplace = true
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds does not match the current one")
|
||||
}
|
||||
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Affinity, statefulSet.Spec.Template.Spec.Affinity) {
|
||||
needsReplace = true
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's pod affinity doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's pod affinity does not match the current one")
|
||||
}
|
||||
|
||||
// Some generated fields like creationTimestamp make it not possible to use DeepCompare on Spec.Template.ObjectMeta
|
||||
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Labels, statefulSet.Spec.Template.Labels) {
|
||||
needsReplace = true
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's metadata labels doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's metadata labels does not match the current one")
|
||||
}
|
||||
if (c.Statefulset.Spec.Selector != nil) && (statefulSet.Spec.Selector != nil) {
|
||||
if !reflect.DeepEqual(c.Statefulset.Spec.Selector.MatchLabels, statefulSet.Spec.Selector.MatchLabels) {
|
||||
|
|
@ -420,7 +420,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
return &compareStatefulsetResult{}
|
||||
}
|
||||
needsReplace = true
|
||||
reasons = append(reasons, "new statefulset's selector doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's selector does not match the current one")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -434,7 +434,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
match = false
|
||||
needsReplace = true
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's pod template security context in spec doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's pod template security context in spec does not match the current one")
|
||||
}
|
||||
if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) {
|
||||
needsReplace = true
|
||||
|
|
@ -445,17 +445,17 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
// Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta
|
||||
if name != statefulSet.Spec.VolumeClaimTemplates[i].Name {
|
||||
needsReplace = true
|
||||
reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d doesn't match the current one", i))
|
||||
reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i))
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) {
|
||||
needsReplace = true
|
||||
reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q doesn't match the current one", name))
|
||||
reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q does not match the current one", name))
|
||||
}
|
||||
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) {
|
||||
name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name
|
||||
needsReplace = true
|
||||
reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q doesn't match the current one", name))
|
||||
reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q does not match the current one", name))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -465,14 +465,14 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
match = false
|
||||
needsReplace = true
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's pod priority class in spec doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's pod priority class in spec does not match the current one")
|
||||
}
|
||||
|
||||
// lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image
|
||||
// until they are re-created for other reasons, for example node rotation
|
||||
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) {
|
||||
needsReplace = true
|
||||
reasons = append(reasons, "lazy Spilo update: new statefulset's pod image doesn't match the current one")
|
||||
reasons = append(reasons, "lazy Spilo update: new statefulset's pod image does not match the current one")
|
||||
}
|
||||
|
||||
if needsRollUpdate || needsReplace {
|
||||
|
|
@ -582,7 +582,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
|
|||
return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err)
|
||||
}
|
||||
if isSmaller {
|
||||
c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
|
||||
c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be increased", cpuLimit, minCPULimit)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
|
||||
spec.Resources.ResourceLimits.CPU = minCPULimit
|
||||
}
|
||||
|
|
@ -595,7 +595,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
|
|||
return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err)
|
||||
}
|
||||
if isSmaller {
|
||||
c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
|
||||
c.logger.Warningf("defined memory limit %s is below required minimum %s and will be increase", memoryLimit, minMemoryLimit)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
|
||||
spec.Resources.ResourceLimits.Memory = minMemoryLimit
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1157,7 +1157,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
}
|
||||
|
||||
// generate the spilo container
|
||||
c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars)
|
||||
c.logger.Debugf("Generating Spilo container, environment variables")
|
||||
c.logger.Debugf("%v", spiloEnvVars)
|
||||
|
||||
spiloContainer := generateContainer(c.containerName(),
|
||||
&effectiveDockerImage,
|
||||
resourceRequirements,
|
||||
|
|
@ -2055,7 +2057,8 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
|
|||
envVars = append(envVars, v1.EnvVar{Name: "AWS_SECRET_ACCESS_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey})
|
||||
}
|
||||
|
||||
c.logger.Debugf("Generated logical backup env vars %v", envVars)
|
||||
c.logger.Debugf("Generated logical backup env vars")
|
||||
c.logger.Debugf("%v", envVars)
|
||||
return envVars
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -293,7 +293,7 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error {
|
|||
|
||||
// setRollingUpdateFlagForStatefulSet sets the indicator or the rolling update requirement
|
||||
// in the StatefulSet annotation.
|
||||
func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, val bool) {
|
||||
func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, val bool, msg string) {
|
||||
anno := sset.GetAnnotations()
|
||||
if anno == nil {
|
||||
anno = make(map[string]string)
|
||||
|
|
@ -301,13 +301,13 @@ func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, v
|
|||
|
||||
anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val)
|
||||
sset.SetAnnotations(anno)
|
||||
c.logger.Debugf("statefulset's rolling update annotation has been set to %t", val)
|
||||
c.logger.Debugf("set statefulset's rolling update annotation to %t: caller/reason %s", val, msg)
|
||||
}
|
||||
|
||||
// applyRollingUpdateFlagforStatefulSet sets the rolling update flag for the cluster's StatefulSet
|
||||
// and applies that setting to the actual running cluster.
|
||||
func (c *Cluster) applyRollingUpdateFlagforStatefulSet(val bool) error {
|
||||
c.setRollingUpdateFlagForStatefulSet(c.Statefulset, val)
|
||||
c.setRollingUpdateFlagForStatefulSet(c.Statefulset, val, "applyRollingUpdateFlag")
|
||||
sset, err := c.updateStatefulSetAnnotations(c.Statefulset.GetAnnotations())
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -359,14 +359,13 @@ func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *appsv1.St
|
|||
podsRollingUpdateRequired = false
|
||||
} else {
|
||||
c.logger.Infof("found a statefulset with an unfinished rolling update of the pods")
|
||||
|
||||
}
|
||||
}
|
||||
return podsRollingUpdateRequired
|
||||
}
|
||||
|
||||
func (c *Cluster) updateStatefulSetAnnotations(annotations map[string]string) (*appsv1.StatefulSet, error) {
|
||||
c.logger.Debugf("updating statefulset annotations")
|
||||
c.logger.Debugf("patching statefulset annotations")
|
||||
patchData, err := metaAnnotationsPatch(annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not form patch for the statefulset metadata: %v", err)
|
||||
|
|
|
|||
|
|
@ -348,13 +348,13 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not generate statefulset: %v", err)
|
||||
}
|
||||
c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired)
|
||||
c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "from cache")
|
||||
|
||||
cmp := c.compareStatefulSetWith(desiredSS)
|
||||
if !cmp.match {
|
||||
if cmp.rollingUpdate && !podsRollingUpdateRequired {
|
||||
podsRollingUpdateRequired = true
|
||||
c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired)
|
||||
c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "statefulset changes")
|
||||
}
|
||||
|
||||
c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons)
|
||||
|
|
@ -497,11 +497,11 @@ func (c *Cluster) syncSecrets() error {
|
|||
return fmt.Errorf("could not get current secret: %v", err)
|
||||
}
|
||||
if secretUsername != string(secret.Data["username"]) {
|
||||
c.logger.Warningf("secret %q does not contain the role %q", secretSpec.Name, secretUsername)
|
||||
c.logger.Warningf("secret %s does not contain the role %q", secretSpec.Name, secretUsername)
|
||||
continue
|
||||
}
|
||||
c.Secrets[secret.UID] = secret
|
||||
c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta))
|
||||
c.logger.Debugf("secret %s already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta))
|
||||
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
||||
secretUsername = constants.SuperuserKeyName
|
||||
userMap = c.systemUsers
|
||||
|
|
|
|||
|
|
@ -168,7 +168,7 @@ func (c *Cluster) logPDBChanges(old, new *policybeta1.PodDisruptionBudget, isUpd
|
|||
)
|
||||
}
|
||||
|
||||
c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec))
|
||||
logNiceDiff(c.logger, old.Spec, new.Spec)
|
||||
}
|
||||
|
||||
func logNiceDiff(log *logrus.Entry, old, new interface{}) {
|
||||
|
|
@ -181,7 +181,8 @@ func logNiceDiff(log *logrus.Entry, old, new interface{}) {
|
|||
|
||||
nice := nicediff.Diff(string(o), string(n), true)
|
||||
for _, s := range strings.Split(nice, "\n") {
|
||||
log.Debugf(s)
|
||||
// " is not needed in the value to understand
|
||||
log.Debugf(strings.ReplaceAll(s, "\"", ""))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -193,16 +194,17 @@ func (c *Cluster) logStatefulSetChanges(old, new *appsv1.StatefulSet, isUpdate b
|
|||
util.NameFromMeta(old.ObjectMeta),
|
||||
)
|
||||
}
|
||||
if !reflect.DeepEqual(old.Annotations, new.Annotations) {
|
||||
c.logger.Debugf("metadata.annotation diff\n%s\n", util.PrettyDiff(old.Annotations, new.Annotations))
|
||||
}
|
||||
|
||||
c.logger.Debugf("Statefulset spec is different")
|
||||
logNiceDiff(c.logger, old.Spec, new.Spec)
|
||||
|
||||
if !reflect.DeepEqual(old.Annotations, new.Annotations) {
|
||||
c.logger.Debugf("metadata.annotation are different")
|
||||
logNiceDiff(c.logger, old.Annotations, new.Annotations)
|
||||
}
|
||||
|
||||
if len(reasons) > 0 {
|
||||
for _, reason := range reasons {
|
||||
c.logger.Infof("reason: %q", reason)
|
||||
c.logger.Infof("reason: %s", reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -217,7 +219,8 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU
|
|||
role, util.NameFromMeta(old.ObjectMeta),
|
||||
)
|
||||
}
|
||||
c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec))
|
||||
|
||||
logNiceDiff(c.logger, old.Spec, new.Spec)
|
||||
|
||||
if reason != "" {
|
||||
c.logger.Infof("reason: %s", reason)
|
||||
|
|
@ -226,7 +229,7 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU
|
|||
|
||||
func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) {
|
||||
c.logger.Infof("volume specification has been changed")
|
||||
c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old, new))
|
||||
logNiceDiff(c.logger, old, new)
|
||||
}
|
||||
|
||||
func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
||||
|
|
|
|||
|
|
@ -76,6 +76,10 @@ func NewController(controllerConfig *spec.ControllerConfig, controllerId string)
|
|||
logger := logrus.New()
|
||||
if controllerConfig.EnableJsonLogging {
|
||||
logger.SetFormatter(&logrus.JSONFormatter{})
|
||||
} else {
|
||||
if os.Getenv("LOG_NOQUOTE") != "" {
|
||||
logger.SetFormatter(&logrus.TextFormatter{PadLevelText: true, DisableQuote: true})
|
||||
}
|
||||
}
|
||||
|
||||
var myComponentName = "postgres-operator"
|
||||
|
|
@ -84,7 +88,10 @@ func NewController(controllerConfig *spec.ControllerConfig, controllerId string)
|
|||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(logger.Infof)
|
||||
|
||||
// disabling the sending of events also to the logoutput
|
||||
// the operator currently duplicates a lot of log entries with this setup
|
||||
// eventBroadcaster.StartLogging(logger.Infof)
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: myComponentName})
|
||||
|
||||
c := &Controller{
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ func (c *Controller) nodeAdd(obj interface{}) {
|
|||
return
|
||||
}
|
||||
|
||||
c.logger.Debugf("new node has been added: %q (%s)", util.NameFromMeta(node.ObjectMeta), node.Spec.ProviderID)
|
||||
c.logger.Debugf("new node has been added: %s (%s)", util.NameFromMeta(node.ObjectMeta), node.Spec.ProviderID)
|
||||
|
||||
// check if the node became not ready while the operator was down (otherwise we would have caught it in nodeUpdate)
|
||||
if !c.nodeIsReady(node) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue