Refactor SetPostgresCRDStatus to handle additional status fields
This commit is contained in:
parent
9efa6a1eb4
commit
24724dc4ed
|
|
@ -270,33 +270,16 @@ func (c *Cluster) Create() (err error) {
|
|||
ss *appsv1.StatefulSet
|
||||
)
|
||||
|
||||
//Even though its possible to propogate other CR labels to the pods, picking the default label here since its propogated to all the pods by default. But this means that in order for the scale subresource to work properly, user must set the "cluster-name" key in their CRs with value matching the CR name.
|
||||
labelstring := fmt.Sprintf("%s=%s", c.OpConfig.ClusterNameLabel, c.Postgresql.ObjectMeta.Labels[c.OpConfig.ClusterNameLabel])
|
||||
|
||||
defer func() {
|
||||
var (
|
||||
pgUpdatedStatus *acidv1.Postgresql
|
||||
errStatus error
|
||||
)
|
||||
if err == nil {
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
PostgresClusterStatus: acidv1.ClusterStatusRunning,
|
||||
NumberOfInstances: c.Postgresql.Spec.NumberOfInstances,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: c.Postgresql.Generation,
|
||||
Conditions: c.Postgresql.Status.Conditions,
|
||||
}
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "") //TODO: are you sure it's running?
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, c.OpConfig.ClusterNameLabel,"") //TODO: are you sure it's running?
|
||||
} else {
|
||||
c.logger.Warningf("cluster created failed: %v", err)
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
PostgresClusterStatus: acidv1.ClusterStatusAddFailed,
|
||||
NumberOfInstances: 0,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: 0,
|
||||
Conditions: c.Postgresql.Status.Conditions,
|
||||
}
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, err.Error())
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed, c.OpConfig.ClusterNameLabel, err.Error())
|
||||
}
|
||||
if errStatus != nil {
|
||||
c.logger.Warningf("could not set cluster status: %v", errStatus)
|
||||
|
|
@ -306,14 +289,7 @@ func (c *Cluster) Create() (err error) {
|
|||
}
|
||||
}()
|
||||
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
PostgresClusterStatus: acidv1.ClusterStatusCreating,
|
||||
NumberOfInstances: 0,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: 0,
|
||||
Conditions: c.Postgresql.Status.Conditions,
|
||||
}
|
||||
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "")
|
||||
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating, c.OpConfig.ClusterNameLabel, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not set cluster status: %v", err)
|
||||
}
|
||||
|
|
@ -993,15 +969,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
labelstring := fmt.Sprintf("%s=%s", c.OpConfig.ClusterNameLabel, c.Postgresql.ObjectMeta.Labels[c.OpConfig.ClusterNameLabel])
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
PostgresClusterStatus: acidv1.ClusterStatusUpdating,
|
||||
NumberOfInstances: c.Postgresql.Status.NumberOfInstances,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: c.Postgresql.Status.ObservedGeneration,
|
||||
Conditions: c.Postgresql.Status.Conditions,
|
||||
}
|
||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "")
|
||||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating, c.OpConfig.ClusterNameLabel, "")
|
||||
|
||||
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
// do not apply any major version related changes yet
|
||||
|
|
@ -1016,23 +984,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
err error
|
||||
)
|
||||
if updateFailed {
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
PostgresClusterStatus: acidv1.ClusterStatusUpdateFailed,
|
||||
NumberOfInstances: c.Postgresql.Status.NumberOfInstances,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: c.Postgresql.Status.ObservedGeneration,
|
||||
Conditions: c.Postgresql.Status.Conditions,
|
||||
}
|
||||
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, err.Error())
|
||||
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed, c.OpConfig.ClusterNameLabel, err.Error())
|
||||
} else {
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
PostgresClusterStatus: acidv1.ClusterStatusRunning,
|
||||
NumberOfInstances: newSpec.Spec.NumberOfInstances,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: c.Postgresql.Generation,
|
||||
Conditions: c.Postgresql.Status.Conditions,
|
||||
}
|
||||
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "")
|
||||
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, c.OpConfig.ClusterNameLabel, "")
|
||||
}
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not set cluster status: %v", err)
|
||||
|
|
|
|||
|
|
@ -47,26 +47,11 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
pgUpdatedStatus *acidv1.Postgresql
|
||||
errStatus error
|
||||
)
|
||||
labelstring := fmt.Sprintf("%s=%s", c.OpConfig.ClusterNameLabel, c.Postgresql.ObjectMeta.Labels[c.OpConfig.ClusterNameLabel])
|
||||
if err != nil {
|
||||
c.logger.Warningf("error while syncing cluster state: %v", err)
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
PostgresClusterStatus: acidv1.ClusterStatusSyncFailed,
|
||||
NumberOfInstances: newSpec.Status.NumberOfInstances,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: c.Postgresql.Status.ObservedGeneration,
|
||||
Conditions: c.Postgresql.Status.Conditions,
|
||||
}
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, errStatus.Error())
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed, c.OpConfig.ClusterNameLabel, err.Error())
|
||||
} else if !c.Status.Running() {
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
PostgresClusterStatus: acidv1.ClusterStatusRunning,
|
||||
NumberOfInstances: newSpec.Spec.NumberOfInstances,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: c.Postgresql.Generation,
|
||||
Conditions: c.Postgresql.Status.Conditions,
|
||||
}
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "")
|
||||
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, c.OpConfig.ClusterNameLabel, "")
|
||||
}
|
||||
if errStatus != nil {
|
||||
c.logger.Warningf("could not set cluster status: %v", errStatus)
|
||||
|
|
|
|||
|
|
@ -161,15 +161,7 @@ func (c *Controller) acquireInitialListOfClusters() error {
|
|||
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) (*cluster.Cluster, error) {
|
||||
if c.opConfig.EnableTeamIdClusternamePrefix {
|
||||
if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil {
|
||||
labelstring := fmt.Sprintf("%s=%s", c.opConfig.ClusterNameLabel, pgSpec.ObjectMeta.Labels[c.opConfig.ClusterNameLabel])
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
PostgresClusterStatus: acidv1.ClusterStatusInvalid,
|
||||
NumberOfInstances: pgSpec.Status.NumberOfInstances,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: pgSpec.Status.ObservedGeneration,
|
||||
Conditions: pgSpec.Status.Conditions,
|
||||
}
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, err.Error())
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid, c.opConfig.ClusterNameLabel, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
@ -475,26 +467,15 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.
|
|||
|
||||
if clusterError != "" && eventType != EventDelete {
|
||||
c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError)
|
||||
labelstring := fmt.Sprintf("%s=%s", c.opConfig.ClusterNameLabel, informerNewSpec.ObjectMeta.Labels[c.opConfig.ClusterNameLabel])
|
||||
ClusterStatus := acidv1.PostgresStatus{
|
||||
NumberOfInstances: informerNewSpec.Status.NumberOfInstances,
|
||||
LabelSelector: labelstring,
|
||||
ObservedGeneration: informerNewSpec.Status.ObservedGeneration,
|
||||
Conditions: informerNewSpec.Status.Conditions,
|
||||
}
|
||||
|
||||
switch eventType {
|
||||
case EventAdd:
|
||||
ClusterStatus.PostgresClusterStatus = acidv1.ClusterStatusAddFailed
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, clusterError)
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed, c.opConfig.ClusterNameLabel, clusterError)
|
||||
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError)
|
||||
case EventUpdate:
|
||||
ClusterStatus.PostgresClusterStatus = acidv1.ClusterStatusUpdateFailed
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, clusterError)
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed, c.opConfig.ClusterNameLabel, clusterError)
|
||||
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError)
|
||||
default:
|
||||
ClusterStatus.PostgresClusterStatus = acidv1.ClusterStatusSyncFailed
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, clusterError)
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed, c.opConfig.ClusterNameLabel, clusterError)
|
||||
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -192,11 +192,21 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
|
|||
}
|
||||
|
||||
// SetPostgresCRDStatus of Postgres cluster
|
||||
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, pgStatus apiacidv1.PostgresStatus, message string) (*apiacidv1.Postgresql, error) {
|
||||
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string, clusterNameLabel string, message string) (*apiacidv1.Postgresql, error) {
|
||||
var pg *apiacidv1.Postgresql
|
||||
var pgStatus apiacidv1.PostgresStatus
|
||||
|
||||
newConditions := updateConditions(pgStatus.Conditions, pgStatus.PostgresClusterStatus, message)
|
||||
pgStatus.Conditions = newConditions
|
||||
pg, err := client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).Get(context.TODO(), clusterName.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch Postgres CR %s/%s: %v", clusterName.Namespace, clusterName.Name, err)
|
||||
}
|
||||
|
||||
pgStatus = updateConditions(pg, status, message)
|
||||
if pgStatus.LabelSelector == "" {
|
||||
pgStatus.LabelSelector = fmt.Sprintf("%s=%s", clusterNameLabel, pg.Name)
|
||||
}
|
||||
|
||||
pgStatus.PostgresClusterStatus = status
|
||||
|
||||
patch, err := json.Marshal(struct {
|
||||
PgStatus interface{} `json:"status"`
|
||||
|
|
@ -218,8 +228,10 @@ func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.Namespaced
|
|||
return pg, nil
|
||||
}
|
||||
|
||||
func updateConditions(existingConditions apiacidv1.Conditions, currentStatus string, message string) apiacidv1.Conditions {
|
||||
func updateConditions(existingPg *apiacidv1.Postgresql, currentStatus string, message string) apiacidv1.PostgresStatus {
|
||||
now := apiacidv1.VolatileTime{Inner: metav1.NewTime(time.Now())}
|
||||
existingStatus := existingPg.Status
|
||||
existingConditions := existingStatus.Conditions
|
||||
var readyCondition, reconciliationCondition *apiacidv1.Condition
|
||||
|
||||
// Find existing conditions
|
||||
|
|
@ -231,21 +243,6 @@ func updateConditions(existingConditions apiacidv1.Conditions, currentStatus str
|
|||
}
|
||||
}
|
||||
|
||||
// Initialize conditions if not present
|
||||
switch currentStatus {
|
||||
case "Creating":
|
||||
if reconciliationCondition == nil {
|
||||
existingConditions = append(existingConditions, apiacidv1.Condition{Type: "ReconciliationSuccessful"})
|
||||
reconciliationCondition = &existingConditions[len(existingConditions)-1]
|
||||
|
||||
}
|
||||
default:
|
||||
if readyCondition == nil {
|
||||
existingConditions = append(existingConditions, apiacidv1.Condition{Type: "Ready"})
|
||||
readyCondition = &existingConditions[len(existingConditions)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// Safety checks to avoid nil pointer dereference
|
||||
if readyCondition == nil {
|
||||
readyCondition = &apiacidv1.Condition{Type: "Ready"}
|
||||
|
|
@ -262,15 +259,23 @@ func updateConditions(existingConditions apiacidv1.Conditions, currentStatus str
|
|||
case "Running":
|
||||
readyCondition.Status = v1.ConditionTrue
|
||||
readyCondition.LastTransitionTime = now
|
||||
existingPg.Status.NumberOfInstances = existingPg.Spec.NumberOfInstances
|
||||
existingPg.Status.ObservedGeneration = existingPg.Generation
|
||||
case "CreateFailed":
|
||||
readyCondition.Status = v1.ConditionFalse
|
||||
readyCondition.LastTransitionTime = now
|
||||
existingPg.Status.NumberOfInstances = 0
|
||||
existingPg.Status.ObservedGeneration = 0
|
||||
case "UpdateFailed", "SyncFailed", "Invalid":
|
||||
if readyCondition.Status == v1.ConditionFalse {
|
||||
readyCondition.LastTransitionTime = now
|
||||
existingPg.Status.NumberOfInstances = existingStatus.NumberOfInstances
|
||||
existingPg.Status.ObservedGeneration = existingStatus.ObservedGeneration
|
||||
}
|
||||
case "Updating":
|
||||
// not updatinf time, just setting the status
|
||||
existingPg.Status.NumberOfInstances = existingStatus.NumberOfInstances
|
||||
existingPg.Status.ObservedGeneration = existingStatus.ObservedGeneration
|
||||
// not updating time, just setting the status
|
||||
if readyCondition.Status == v1.ConditionFalse {
|
||||
readyCondition.Status = v1.ConditionFalse
|
||||
} else {
|
||||
|
|
@ -297,7 +302,19 @@ func updateConditions(existingConditions apiacidv1.Conditions, currentStatus str
|
|||
}
|
||||
}
|
||||
|
||||
return existingConditions
|
||||
if currentStatus == "Creating" {
|
||||
existingPg.Status.NumberOfInstances = 0
|
||||
existingPg.Status.ObservedGeneration = 0
|
||||
for i := range existingConditions {
|
||||
if existingConditions[i].Type == "Ready" {
|
||||
existingConditions = append(existingConditions[:i], existingConditions[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
existingPg.Status.Conditions = existingConditions
|
||||
|
||||
return existingPg.Status
|
||||
}
|
||||
|
||||
// SetFinalizer of Postgres cluster
|
||||
|
|
|
|||
Loading…
Reference in New Issue