diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index d740260d2..e97843373 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -8,6 +8,7 @@ import ( "fmt" "reflect" "regexp" + "strconv" "sync" "time" @@ -633,6 +634,28 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } }() + // delete persistent volume claim after scale down + if oldSpec.Spec.NumberOfInstances > newSpec.Spec.NumberOfInstances { + c.logger.Debug("deleting pvc of shut down pods") + + for i := oldSpec.Spec.NumberOfInstances - 1; i >= newSpec.Spec.NumberOfInstances; i-- { + + // Scaling down to 0 replicas is not cluster deletion so keep the last pvc. + // Operator will remove it only when explicit "kubectl pg delete" is issued + if i == 0 { + c.logger.Info("cluster scaled down to 0 pods; skipping deletion of the last pvc") + break + } + + podIndex := strconv.Itoa(int(i)) + pvcName := "pgdata-" + c.Name + "-" + podIndex + if err := c.KubeClient.PersistentVolumeClaims(c.Namespace).Delete(pvcName, c.deleteOptions); err != nil { + c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err) + // failing to delete pvc does not fail the update; Sync() may also delete unused PVCs later + } + } + } + // pod disruption budget if oldSpec.Spec.NumberOfInstances != newSpec.Spec.NumberOfInstances { c.logger.Debug("syncing pod disruption budgets") diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index b04ff863b..ffe1cb28e 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -2,6 +2,7 @@ package cluster import ( "fmt" + "strconv" batchv1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/api/core/v1" @@ -108,6 +109,24 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } } + // remove unused PVCs in case deleting them during scale down failed; see Update() + // the last pvc stays until the cluster is explicitly deleted as opposed to being scaled down to 0 pods + if c.getNumberOfInstances(&c.Spec) > 0 { + + for i := c.getNumberOfInstances(&c.Spec); ; i++ { + podIndex := strconv.Itoa(int(i)) + pvcName := "pgdata-" + c.Name + "-" + podIndex + if err := c.KubeClient.PersistentVolumeClaims(c.Namespace).Delete(pvcName, c.deleteOptions); err != nil { + if k8sutil.ResourceNotFound(err) { + // no more pvcs to delete + break + } + c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err) + // next Sync() or Update() will retry + } + } + } + return err }