minor changes and fixes for unit test

Signed-off-by: RavinaChidambaram <cravina.19@gmail.com>
This commit is contained in:
RavinaChidambaram 2024-08-29 18:25:51 +05:30
parent 30ed723c95
commit 0269432560
6 changed files with 110 additions and 40 deletions

View File

@ -23,9 +23,7 @@ const (
OperatorConfigCRDResourceList = OperatorConfigCRDResouceKind + "List"
OperatorConfigCRDResourceName = OperatorConfigCRDResourcePlural + "." + acidzalando.GroupName
OperatorConfigCRDResourceShort = "opconfig"
)
var (
specReplicasPath = ".spec.numberOfInstances"
statusReplicasPath = ".status.numberOfInstances"
labelSelectorPath = ".status.labelSelector"

View File

@ -228,7 +228,7 @@ type UserFlags []string
type Conditions []Condition
type ConditionType string
type PostgresqlConditionType string
type VolatileTime struct {
Inner metav1.Time `json:",inline"`
}
@ -254,7 +254,7 @@ func init() {
// Condition contains the conditions of the PostgreSQL cluster
type Condition struct {
Type ConditionType `json:"type" description:"type of status condition"`
Type PostgresqlConditionType `json:"type" description:"type of status condition"`
Status v1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
LastTransitionTime VolatileTime `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"`
Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"`

View File

@ -256,19 +256,32 @@ func (c *Cluster) Create() (err error) {
)
//Even though its possible to propogate other CR labels to the pods, picking the default label here since its propogated to all the pods by default. But this means that in order for the scale subresource to work properly, user must set the "cluster-name" key in their CRs with value matching the CR name.
labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"]) //TODO: make this configurable.
labelstring := fmt.Sprintf("%s=%s", c.OpConfig.ClusterNameLabel, c.Postgresql.ObjectMeta.Labels[c.OpConfig.ClusterNameLabel])
defer func() {
var (
pgUpdatedStatus *acidv1.Postgresql
errStatus error
)
existingConditions := c.Postgresql.Status.Conditions
if err == nil {
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, c.Postgresql.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingConditions, "") //TODO: are you sure it's running?
ClusterStatus := acidv1.PostgresStatus{
PostgresClusterStatus: acidv1.ClusterStatusRunning,
NumberOfInstances: c.Postgresql.Spec.NumberOfInstances,
LabelSelector: labelstring,
ObservedGeneration: c.Postgresql.Generation,
Conditions: c.Postgresql.Status.Conditions,
}
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "") //TODO: are you sure it's running?
} else {
c.logger.Warningf("cluster created failed: %v", err)
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed, 0, labelstring, 0, existingConditions, err.Error())
ClusterStatus := acidv1.PostgresStatus{
PostgresClusterStatus: acidv1.ClusterStatusAddFailed,
NumberOfInstances: 0,
LabelSelector: labelstring,
ObservedGeneration: 0,
Conditions: c.Postgresql.Status.Conditions,
}
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, err.Error())
}
if errStatus != nil {
c.logger.Warningf("could not set cluster status: %v", errStatus)
@ -278,8 +291,14 @@ func (c *Cluster) Create() (err error) {
}
}()
existingConditions := c.Postgresql.Status.Conditions
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating, 0, labelstring, 0, existingConditions, "")
ClusterStatus := acidv1.PostgresStatus{
PostgresClusterStatus: acidv1.ClusterStatusCreating,
NumberOfInstances: 0,
LabelSelector: labelstring,
ObservedGeneration: 0,
Conditions: c.Postgresql.Status.Conditions,
}
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "")
if err != nil {
return fmt.Errorf("could not set cluster status: %v", err)
}
@ -932,9 +951,15 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
c.mu.Lock()
defer c.mu.Unlock()
labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"])
existingConditions := c.Postgresql.Status.Conditions
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingConditions, "")
labelstring := fmt.Sprintf("%s=%s", c.OpConfig.ClusterNameLabel, c.Postgresql.ObjectMeta.Labels[c.OpConfig.ClusterNameLabel])
ClusterStatus := acidv1.PostgresStatus{
PostgresClusterStatus: acidv1.ClusterStatusUpdating,
NumberOfInstances: c.Postgresql.Status.NumberOfInstances,
LabelSelector: labelstring,
ObservedGeneration: c.Postgresql.Status.ObservedGeneration,
Conditions: c.Postgresql.Status.Conditions,
}
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "")
c.setSpec(newSpec)
defer func() {
@ -943,9 +968,23 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
err error
)
if updateFailed {
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed, c.Postgresql.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingConditions, err.Error())
ClusterStatus := acidv1.PostgresStatus{
PostgresClusterStatus: acidv1.ClusterStatusUpdateFailed,
NumberOfInstances: c.Postgresql.Status.NumberOfInstances,
LabelSelector: labelstring,
ObservedGeneration: c.Postgresql.Status.ObservedGeneration,
Conditions: c.Postgresql.Status.Conditions,
}
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, err.Error())
} else {
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingConditions, "")
ClusterStatus := acidv1.PostgresStatus{
PostgresClusterStatus: acidv1.ClusterStatusRunning,
NumberOfInstances: newSpec.Spec.NumberOfInstances,
LabelSelector: labelstring,
ObservedGeneration: c.Postgresql.Generation,
Conditions: c.Postgresql.Status.Conditions,
}
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "")
}
if err != nil {
c.logger.Warningf("could not set cluster status: %v", err)

View File

@ -46,13 +46,26 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
pgUpdatedStatus *acidv1.Postgresql
errStatus error
)
labelstring := fmt.Sprintf("%s=%s", "cluster-name", c.Postgresql.ObjectMeta.Labels["cluster-name"])
existingConditions := c.Postgresql.Status.Conditions
labelstring := fmt.Sprintf("%s=%s", c.OpConfig.ClusterNameLabel, c.Postgresql.ObjectMeta.Labels[c.OpConfig.ClusterNameLabel])
if err != nil {
c.logger.Warningf("error while syncing cluster state: %v", err)
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed, newSpec.Status.NumberOfInstances, labelstring, c.Postgresql.Status.ObservedGeneration, existingConditions, errStatus.Error())
ClusterStatus := acidv1.PostgresStatus{
PostgresClusterStatus: acidv1.ClusterStatusSyncFailed,
NumberOfInstances: newSpec.Status.NumberOfInstances,
LabelSelector: labelstring,
ObservedGeneration: c.Postgresql.Status.ObservedGeneration,
Conditions: c.Postgresql.Status.Conditions,
}
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, errStatus.Error())
} else if !c.Status.Running() {
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning, newSpec.Spec.NumberOfInstances, labelstring, c.Postgresql.Generation, existingConditions, "")
ClusterStatus := acidv1.PostgresStatus{
PostgresClusterStatus: acidv1.ClusterStatusRunning,
NumberOfInstances: newSpec.Spec.NumberOfInstances,
LabelSelector: labelstring,
ObservedGeneration: c.Postgresql.Generation,
Conditions: c.Postgresql.Status.Conditions,
}
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), ClusterStatus, "")
}
if errStatus != nil {
c.logger.Warningf("could not set cluster status: %v", errStatus)

View File

@ -161,9 +161,15 @@ func (c *Controller) acquireInitialListOfClusters() error {
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) (*cluster.Cluster, error) {
if c.opConfig.EnableTeamIdClusternamePrefix {
if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil {
labelstring := fmt.Sprintf("%s=%s", "cluster-name", pgSpec.ObjectMeta.Labels["cluster-name"])
existingConditions := pgSpec.Status.Conditions
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid, pgSpec.Status.NumberOfInstances, labelstring, pgSpec.Status.ObservedGeneration, existingConditions, err.Error())
labelstring := fmt.Sprintf("%s=%s", c.opConfig.ClusterNameLabel, pgSpec.ObjectMeta.Labels[c.opConfig.ClusterNameLabel])
ClusterStatus := acidv1.PostgresStatus{
PostgresClusterStatus: acidv1.ClusterStatusInvalid,
NumberOfInstances: pgSpec.Status.NumberOfInstances,
LabelSelector: labelstring,
ObservedGeneration: pgSpec.Status.ObservedGeneration,
Conditions: pgSpec.Status.Conditions,
}
c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, err.Error())
return nil, err
}
}
@ -209,10 +215,10 @@ func (c *Controller) processEvent(event ClusterEvent) {
if event.EventType == EventRepair {
runRepair, lastOperationStatus := cl.NeedsRepair()
if !runRepair {
lg.Debugf("observed cluster status %s, repair is not required", lastOperationStatus)
lg.Debugf("observed cluster status %#v, repair is not required", lastOperationStatus)
return
}
lg.Debugf("observed cluster status %s, running sync scan to repair the cluster", lastOperationStatus)
lg.Debugf("observed cluster status %#v, running sync scan to repair the cluster", lastOperationStatus)
event.EventType = EventSync
}
@ -474,18 +480,26 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.
if clusterError != "" && eventType != EventDelete {
c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError)
labelstring := fmt.Sprintf("%s=%s", "cluster-name", informerNewSpec.ObjectMeta.Labels["cluster-name"])
existingConditions := informerNewSpec.Status.Conditions
labelstring := fmt.Sprintf("%s=%s", c.opConfig.ClusterNameLabel, informerNewSpec.ObjectMeta.Labels[c.opConfig.ClusterNameLabel])
ClusterStatus := acidv1.PostgresStatus{
NumberOfInstances: informerNewSpec.Status.NumberOfInstances,
LabelSelector: labelstring,
ObservedGeneration: informerNewSpec.Status.ObservedGeneration,
Conditions: informerNewSpec.Status.Conditions,
}
switch eventType {
case EventAdd:
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingConditions, clusterError)
ClusterStatus.PostgresClusterStatus = acidv1.ClusterStatusAddFailed
c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, clusterError)
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError)
case EventUpdate:
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingConditions, clusterError)
ClusterStatus.PostgresClusterStatus = acidv1.ClusterStatusUpdateFailed
c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, clusterError)
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError)
default:
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed, informerNewSpec.Status.NumberOfInstances, labelstring, informerNewSpec.Status.ObservedGeneration, existingConditions, clusterError)
ClusterStatus.PostgresClusterStatus = acidv1.ClusterStatusSyncFailed
c.KubeClient.SetPostgresCRDStatus(clusterName, ClusterStatus, clusterError)
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError)
}

View File

@ -193,15 +193,10 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
}
// SetPostgresCRDStatus of Postgres cluster
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string, numberOfInstances int32, labelSelector string, observedGeneration int64, existingConditions apiacidv1.Conditions, message string) (*apiacidv1.Postgresql, error) {
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, pgStatus apiacidv1.PostgresStatus, message string) (*apiacidv1.Postgresql, error) {
var pg *apiacidv1.Postgresql
pgStatus := apiacidv1.PostgresStatus{}
pgStatus.PostgresClusterStatus = status
pgStatus.NumberOfInstances = numberOfInstances
pgStatus.LabelSelector = labelSelector
pgStatus.ObservedGeneration = observedGeneration
newConditions := updateConditions(existingConditions, status, message)
newConditions := updateConditions(pgStatus.Conditions, pgStatus.PostgresClusterStatus, message)
pgStatus.Conditions = newConditions
patch, err := json.Marshal(struct {
@ -252,6 +247,17 @@ func updateConditions(existingConditions apiacidv1.Conditions, currentStatus str
}
}
// Safety checks to avoid nil pointer dereference
if readyCondition == nil {
readyCondition = &apiacidv1.Condition{Type: "Ready"}
existingConditions = append(existingConditions, *readyCondition)
}
if reconciliationCondition == nil {
reconciliationCondition = &apiacidv1.Condition{Type: "ReconciliationSuccessful"}
existingConditions = append(existingConditions, *reconciliationCondition)
}
// Update Ready condition
switch currentStatus {
case "Running":