Initial implementation for the statefulset annotations indicating rolling updates.
This commit is contained in:
parent
43a1db2128
commit
ce0d4af91c
|
|
@ -6,6 +6,7 @@
|
||||||
# Folders
|
# Folders
|
||||||
_obj
|
_obj
|
||||||
_test
|
_test
|
||||||
|
_manifests
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
# Architecture specific extensions/prefixes
|
||||||
*.[568vq]
|
*.[568vq]
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,6 @@ type Cluster struct {
|
||||||
currentProcess spec.Process
|
currentProcess spec.Process
|
||||||
processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex
|
processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex
|
||||||
specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex
|
specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex
|
||||||
pendingRollingUpdate *bool // indicates the cluster needs a rolling update
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type compareStatefulsetResult struct {
|
type compareStatefulsetResult struct {
|
||||||
|
|
@ -115,7 +114,6 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec spec.Postgresql
|
||||||
deleteOptions: &metav1.DeleteOptions{OrphanDependents: &orphanDependents},
|
deleteOptions: &metav1.DeleteOptions{OrphanDependents: &orphanDependents},
|
||||||
podEventsQueue: podEventsQueue,
|
podEventsQueue: podEventsQueue,
|
||||||
KubeClient: kubeClient,
|
KubeClient: kubeClient,
|
||||||
pendingRollingUpdate: nil,
|
|
||||||
}
|
}
|
||||||
cluster.logger = logger.WithField("pkg", "cluster").WithField("cluster-name", cluster.clusterName())
|
cluster.logger = logger.WithField("pkg", "cluster").WithField("cluster-name", cluster.clusterName())
|
||||||
cluster.teamsAPIClient = teams.NewTeamsAPI(cfg.OpConfig.TeamsAPIUrl, logger)
|
cluster.teamsAPIClient = teams.NewTeamsAPI(cfg.OpConfig.TeamsAPIUrl, logger)
|
||||||
|
|
@ -251,7 +249,6 @@ func (c *Cluster) Create() error {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
c.setStatus(spec.ClusterStatusCreating)
|
c.setStatus(spec.ClusterStatusCreating)
|
||||||
c.setPendingRollingUpgrade(false)
|
|
||||||
|
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
|
|
||||||
|
|
@ -301,7 +298,7 @@ func (c *Cluster) Create() error {
|
||||||
if c.Statefulset != nil {
|
if c.Statefulset != nil {
|
||||||
return fmt.Errorf("statefulset already exists in the cluster")
|
return fmt.Errorf("statefulset already exists in the cluster")
|
||||||
}
|
}
|
||||||
ss, err = c.createStatefulSet()
|
ss, err = c.createStatefulSet(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create statefulset: %v", err)
|
return fmt.Errorf("could not create statefulset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -345,6 +342,10 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
|
||||||
match = false
|
match = false
|
||||||
reasons = append(reasons, "new statefulset's number of replicas doesn't match the current one")
|
reasons = append(reasons, "new statefulset's number of replicas doesn't match the current one")
|
||||||
}
|
}
|
||||||
|
if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) {
|
||||||
|
match = false
|
||||||
|
reasons = append(reasons, "new statefulset's annotations doesn't match the current one")
|
||||||
|
}
|
||||||
if len(c.Statefulset.Spec.Template.Spec.Containers) != len(statefulSet.Spec.Template.Spec.Containers) {
|
if len(c.Statefulset.Spec.Template.Spec.Containers) != len(statefulSet.Spec.Template.Spec.Containers) {
|
||||||
needsRollUpdate = true
|
needsRollUpdate = true
|
||||||
reasons = append(reasons, "new statefulset's container specification doesn't match the current one")
|
reasons = append(reasons, "new statefulset's container specification doesn't match the current one")
|
||||||
|
|
@ -396,9 +397,10 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations) {
|
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations) {
|
||||||
needsRollUpdate = true
|
match = false
|
||||||
needsReplace = true
|
needsReplace = true
|
||||||
reasons = append(reasons, "new statefulset's metadata annotations doesn't match the current one")
|
needsRollUpdate = true
|
||||||
|
reasons = append(reasons, "new statefulset's pod template metadata annotations doesn't match the current one")
|
||||||
}
|
}
|
||||||
if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) {
|
if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) {
|
||||||
needsReplace = true
|
needsReplace = true
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,10 @@ import (
|
||||||
"github.com/zalando-incubator/postgres-operator/pkg/util/retryutil"
|
"github.com/zalando-incubator/postgres-operator/pkg/util/retryutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
RollingUpdateStatefulsetAnnotationKey = "zalando-postgres-operator-rolling-update"
|
||||||
|
)
|
||||||
|
|
||||||
func (c *Cluster) listResources() error {
|
func (c *Cluster) listResources() error {
|
||||||
if c.PodDisruptionBudget != nil {
|
if c.PodDisruptionBudget != nil {
|
||||||
c.logger.Infof("found pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta), c.PodDisruptionBudget.UID)
|
c.logger.Infof("found pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta), c.PodDisruptionBudget.UID)
|
||||||
|
|
@ -59,9 +63,38 @@ func (c *Cluster) listResources() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) createStatefulSet() (*v1beta1.StatefulSet, error) {
|
func setRollingUpdateFlag(sset *v1beta1.StatefulSet, val bool) {
|
||||||
|
anno := sset.GetAnnotations()
|
||||||
|
fmt.Printf("rolling upgrade flag has been set to %t", val)
|
||||||
|
if anno == nil {
|
||||||
|
anno = make(map[string]string)
|
||||||
|
}
|
||||||
|
anno[RollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val)
|
||||||
|
sset.SetAnnotations(anno)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRollingUpdateFlag(sset *v1beta1.StatefulSet, defaultValue bool) (flag bool) {
|
||||||
|
anno := sset.GetAnnotations()
|
||||||
|
flag = defaultValue
|
||||||
|
|
||||||
|
stringFlag, exists := anno[RollingUpdateStatefulsetAnnotationKey]
|
||||||
|
if exists {
|
||||||
|
var err error
|
||||||
|
if flag, err = strconv.ParseBool(stringFlag); err != nil {
|
||||||
|
fmt.Printf("error when parsing %s annotation for the statefulset %s: expected boolean value, got %s\n",
|
||||||
|
RollingUpdateStatefulsetAnnotationKey,
|
||||||
|
types.NamespacedName{sset.Namespace, sset.Name},
|
||||||
|
stringFlag)
|
||||||
|
flag = defaultValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return flag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) createStatefulSet(pendingRollingUpgrade bool) (*v1beta1.StatefulSet, error) {
|
||||||
c.setProcessName("creating statefulset")
|
c.setProcessName("creating statefulset")
|
||||||
statefulSetSpec, err := c.generateStatefulSet(&c.Spec)
|
statefulSetSpec, err := c.generateStatefulSet(&c.Spec)
|
||||||
|
setRollingUpdateFlag(statefulSetSpec, pendingRollingUpgrade)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not generate statefulset: %v", err)
|
return nil, fmt.Errorf("could not generate statefulset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -128,7 +161,7 @@ func (c *Cluster) preScaleDown(newStatefulSet *v1beta1.StatefulSet) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) updateStatefulSet(newStatefulSet *v1beta1.StatefulSet) error {
|
func (c *Cluster) updateStatefulSet(newStatefulSet *v1beta1.StatefulSet, includeAnnotations bool) error {
|
||||||
c.setProcessName("updating statefulset")
|
c.setProcessName("updating statefulset")
|
||||||
if c.Statefulset == nil {
|
if c.Statefulset == nil {
|
||||||
return fmt.Errorf("there is no statefulset in the cluster")
|
return fmt.Errorf("there is no statefulset in the cluster")
|
||||||
|
|
@ -153,8 +186,19 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *v1beta1.StatefulSet) error {
|
||||||
types.MergePatchType,
|
types.MergePatchType,
|
||||||
patchData, "")
|
patchData, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not patch statefulset %q: %v", statefulSetName, err)
|
return fmt.Errorf("could not patch statefulset spec %q: %v", statefulSetName, err)
|
||||||
}
|
}
|
||||||
|
if includeAnnotations && newStatefulSet.Annotations != nil {
|
||||||
|
patchData := metadataAnnotationsPatch(newStatefulSet.Annotations)
|
||||||
|
statefulSet, err = c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch(
|
||||||
|
c.Statefulset.Name,
|
||||||
|
types.StrategicMergePatchType,
|
||||||
|
[]byte(patchData), "")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not patch statefulset annotations %q: %v", patchData, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c.Statefulset = statefulSet
|
c.Statefulset = statefulSet
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -220,7 +220,9 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) syncStatefulSet() error {
|
func (c *Cluster) syncStatefulSet() error {
|
||||||
|
var (
|
||||||
|
cachedRollingUpdateFlag, podsRollingUpdateRequired bool
|
||||||
|
)
|
||||||
sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), metav1.GetOptions{})
|
sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !k8sutil.ResourceNotFound(err) {
|
if !k8sutil.ResourceNotFound(err) {
|
||||||
|
|
@ -234,7 +236,8 @@ func (c *Cluster) syncStatefulSet() error {
|
||||||
return fmt.Errorf("could not list pods of the statefulset: %v", err)
|
return fmt.Errorf("could not list pods of the statefulset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sset, err = c.createStatefulSet()
|
podsRollingUpdateRequired := (len(pods) > 0)
|
||||||
|
sset, err = c.createStatefulSet(podsRollingUpdateRequired)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create missing statefulset: %v", err)
|
return fmt.Errorf("could not create missing statefulset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -244,36 +247,42 @@ func (c *Cluster) syncStatefulSet() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Infof("created missing statefulset %q", util.NameFromMeta(sset.ObjectMeta))
|
c.logger.Infof("created missing statefulset %q", util.NameFromMeta(sset.ObjectMeta))
|
||||||
if len(pods) <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
c.logger.Infof("found pods without the statefulset: trigger rolling update")
|
|
||||||
c.setPendingRollingUpgrade(true)
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
if c.Statefulset != nil {
|
||||||
|
// if we reset the rolling update flag in the statefulset structure in memory but didn't manage to update
|
||||||
|
// the actual object in Kubernetes for some reason we want to avoid doing an unnecessary update by relying
|
||||||
|
// on the 'cached' in-memory flag.
|
||||||
|
cachedRollingUpdateFlag = getRollingUpdateFlag(c.Statefulset, true)
|
||||||
|
c.logger.Debugf("cached statefulset value exists, rollingUpdate flag is %t", cachedRollingUpdateFlag)
|
||||||
|
}
|
||||||
// statefulset is already there, make sure we use its definition in order to compare with the spec.
|
// statefulset is already there, make sure we use its definition in order to compare with the spec.
|
||||||
c.Statefulset = sset
|
c.Statefulset = sset
|
||||||
// resolve the pending rolling upgrade flags as soon as we read an actual statefulset from kubernetes.
|
if podsRollingUpdateRequired = getRollingUpdateFlag(c.Statefulset, false); podsRollingUpdateRequired {
|
||||||
// we must do it before updating statefulsets; after an update, the statfulset will receive a new
|
if cachedRollingUpdateFlag {
|
||||||
// updateRevision, different from the one the pods run with.
|
c.logger.Infof("found a statefulset with an unfinished pods rolling update")
|
||||||
if err := c.resolvePendingRollingUpdate(sset); err != nil {
|
} else {
|
||||||
return fmt.Errorf("could not resolve the rolling upgrade status: %v", err)
|
c.logger.Infof("clearing the rolling update flag based on the cached information")
|
||||||
|
podsRollingUpdateRequired = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
desiredSS, err := c.generateStatefulSet(&c.Spec)
|
desiredSS, err := c.generateStatefulSet(&c.Spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not generate statefulset: %v", err)
|
return fmt.Errorf("could not generate statefulset: %v", err)
|
||||||
}
|
}
|
||||||
|
setRollingUpdateFlag(desiredSS, podsRollingUpdateRequired)
|
||||||
|
|
||||||
cmp := c.compareStatefulSetWith(desiredSS)
|
cmp := c.compareStatefulSetWith(desiredSS)
|
||||||
if !cmp.match {
|
if !cmp.match {
|
||||||
if cmp.rollingUpdate {
|
if cmp.rollingUpdate && !podsRollingUpdateRequired {
|
||||||
c.setPendingRollingUpgrade(true)
|
podsRollingUpdateRequired = true
|
||||||
|
setRollingUpdateFlag(desiredSS, podsRollingUpdateRequired)
|
||||||
}
|
}
|
||||||
c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons)
|
c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons)
|
||||||
|
|
||||||
if !cmp.replace {
|
if !cmp.replace {
|
||||||
if err := c.updateStatefulSet(desiredSS); err != nil {
|
if err := c.updateStatefulSet(desiredSS, true); err != nil {
|
||||||
return fmt.Errorf("could not update statefulset: %v", err)
|
return fmt.Errorf("could not update statefulset: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -285,15 +294,17 @@ func (c *Cluster) syncStatefulSet() error {
|
||||||
}
|
}
|
||||||
// if we get here we also need to re-create the pods (either leftovers from the old
|
// if we get here we also need to re-create the pods (either leftovers from the old
|
||||||
// statefulset or those that got their configuration from the outdated statefulset)
|
// statefulset or those that got their configuration from the outdated statefulset)
|
||||||
if *c.pendingRollingUpdate {
|
if podsRollingUpdateRequired {
|
||||||
c.logger.Debugln("performing rolling update")
|
c.logger.Debugln("performing rolling update")
|
||||||
if err := c.recreatePods(); err != nil {
|
if err := c.recreatePods(); err != nil {
|
||||||
return fmt.Errorf("could not recreate pods: %v", err)
|
return fmt.Errorf("could not recreate pods: %v", err)
|
||||||
}
|
}
|
||||||
c.setPendingRollingUpgrade(false)
|
|
||||||
c.logger.Infof("pods have been recreated")
|
c.logger.Infof("pods have been recreated")
|
||||||
|
setRollingUpdateFlag(c.Statefulset, false)
|
||||||
|
if err := c.updateStatefulSet(c.Statefulset, true); err != nil {
|
||||||
|
c.logger.Warningf("could not clear rolling update for the statefulset")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -39,10 +39,6 @@ func NewSecretOauthTokenGetter(kubeClient *k8sutil.KubernetesClient,
|
||||||
return &SecretOauthTokenGetter{kubeClient, OAuthTokenSecretName}
|
return &SecretOauthTokenGetter{kubeClient, OAuthTokenSecretName}
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
podControllerRevisionHashLabel = "controller-revision-hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (g *SecretOauthTokenGetter) getOAuthToken() (string, error) {
|
func (g *SecretOauthTokenGetter) getOAuthToken() (string, error) {
|
||||||
//TODO: we can move this function to the Controller in case it will be needed there. As for now we use it only in the Cluster
|
//TODO: we can move this function to the Controller in case it will be needed there. As for now we use it only in the Cluster
|
||||||
// Temporary getting postgresql-operator secret from the NamespaceDefault
|
// Temporary getting postgresql-operator secret from the NamespaceDefault
|
||||||
|
|
@ -462,56 +458,3 @@ func (c *Cluster) GetSpec() (*spec.Postgresql, error) {
|
||||||
func (c *Cluster) patroniUsesKubernetes() bool {
|
func (c *Cluster) patroniUsesKubernetes() bool {
|
||||||
return c.OpConfig.EtcdHost == ""
|
return c.OpConfig.EtcdHost == ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) setPendingRollingUpgrade(val bool) {
|
|
||||||
if c.pendingRollingUpdate == nil {
|
|
||||||
c.pendingRollingUpdate = new(bool)
|
|
||||||
}
|
|
||||||
*c.pendingRollingUpdate = val
|
|
||||||
c.logger.Debugf("pending rolling upgrade was set to %b", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// resolvePendingRollingUpdate figures out if rolling upgrade is necessary
|
|
||||||
// based on the states of the cluster statefulset and pods
|
|
||||||
func (c *Cluster) resolvePendingRollingUpdate(sset *v1beta1.StatefulSet) error {
|
|
||||||
// XXX: it looks like we will always trigger a rolling update if the
|
|
||||||
// pods are on a different revision from a statefulset, even if the
|
|
||||||
// statefulset change that caused it didn't require a rolling update
|
|
||||||
// originally.
|
|
||||||
if c.pendingRollingUpdate != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
c.logger.Debugf("evaluating rolling upgrade requirement")
|
|
||||||
effectiveRevision := sset.Status.UpdateRevision
|
|
||||||
if effectiveRevision == "" {
|
|
||||||
if sset.Status.CurrentRevision == "" {
|
|
||||||
c.logger.Debugf("statefulset doesn't have a current revision, no rolling upgrade")
|
|
||||||
// the statefulset does not have a currentRevision, it must be new; hence, no rollingUpdate
|
|
||||||
c.setPendingRollingUpgrade(false)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
effectiveRevision = sset.Status.CurrentRevision
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetch all pods related to this cluster
|
|
||||||
pods, err := c.listPods()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// check their revisions
|
|
||||||
for _, pod := range pods {
|
|
||||||
podRevision, present := pod.Labels[podControllerRevisionHashLabel]
|
|
||||||
// empty or missing revision indicates a new pod - doesn't need a rolling upgrade
|
|
||||||
if !present || podRevision == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c.logger.Debugf("observing pod revision %q vs statefulset revision %q", podRevision, effectiveRevision)
|
|
||||||
if podRevision != effectiveRevision {
|
|
||||||
// pod is on a different revision - trigger the rolling upgrade
|
|
||||||
c.setPendingRollingUpgrade(true)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.setPendingRollingUpgrade(false)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue