diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 7d6b9be07..209402a20 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1804,6 +1804,26 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) ( return podTemplate, nil } +// Return an array of ownerReferences to make an arbitraty object dependent on +// the StatefulSet. Dependency is made on StatefulSet instead of PostgreSQL CRD +// while the former is represent the actual state, and only it's deletion means +// we delete the cluster (e.g. if CRD was deleted, StatefulSet somehow +// survived, we can't delete an object because it will affect the functioning +// cluster). +func (c *Cluster) ownerReferences() []metav1.OwnerReference { + controller := true + + return []metav1.OwnerReference{ + { + UID: c.Statefulset.ObjectMeta.UID, + APIVersion: "apps/v1", + Kind: "StatefulSet", + Name: c.Statefulset.ObjectMeta.Name, + Controller: &controller, + }, + } +} + func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) ( *appsv1.Deployment, error) { @@ -1823,17 +1843,13 @@ func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) ( Namespace: c.Namespace, Labels: c.labelsSet(true), Annotations: map[string]string{}, - // make Postgresql CRD object its owner, so that if CRD object is - // deleted, this object will be deleted even if something went - // wrong and operator didn't deleted it. - OwnerReferences: []metav1.OwnerReference{ - { - UID: c.Statefulset.ObjectMeta.UID, - APIVersion: "apps/v1", - Kind: "StatefulSet", - Name: c.Statefulset.ObjectMeta.Name, - }, - }, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Ophaned" + // propagation policy, which means that it's deletion will not + // clean up this deployment, but there is a hope that this object + // will be garbage collected if something went wrong and operator + // didn't deleted it. + OwnerReferences: c.ownerReferences(), }, Spec: appsv1.DeploymentSpec{ Replicas: numberOfInstances, @@ -1866,17 +1882,13 @@ func (c *Cluster) generateConnPoolService(spec *acidv1.PostgresSpec) *v1.Service Namespace: c.Namespace, Labels: c.labelsSet(true), Annotations: map[string]string{}, - // make Postgresql CRD object its owner, so that if CRD object is - // deleted, this object will be deleted even if something went - // wrong and operator didn't deleted it. - OwnerReferences: []metav1.OwnerReference{ - { - UID: c.Postgresql.ObjectMeta.UID, - APIVersion: acidv1.APIVersion, - Kind: acidv1.PostgresqlKind, - Name: c.Postgresql.ObjectMeta.Name, - }, - }, + // make StatefulSet object its owner to represent the dependency. + // By itself StatefulSet is being deleted with "Ophaned" + // propagation policy, which means that it's deletion will not + // clean up this service, but there is a hope that this object will + // be garbage collected if something went wrong and operator didn't + // deleted it. + OwnerReferences: c.ownerReferences(), }, Spec: serviceSpec, } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 7baa96c02..b4c7e578f 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -154,10 +154,14 @@ func (c *Cluster) deleteConnectionPool() (err error) { return nil } + // set delete propagation policy to foreground, so that replica set will be + // also deleted. + policy := metav1.DeletePropagationForeground + options := metav1.DeleteOptions{PropagationPolicy: &policy} deployment := c.ConnectionPool.Deployment err = c.KubeClient. Deployments(deployment.Namespace). - Delete(deployment.Name, c.deleteOptions) + Delete(deployment.Name, &options) if !k8sutil.ResourceNotFound(err) { c.logger.Debugf("Connection pool deployment was already deleted") @@ -168,10 +172,12 @@ func (c *Cluster) deleteConnectionPool() (err error) { c.logger.Infof("Connection pool deployment %q has been deleted", util.NameFromMeta(deployment.ObjectMeta)) + // set delete propagation policy to foreground, so that all the dependant + // will be deleted. service := c.ConnectionPool.Service err = c.KubeClient. Services(service.Namespace). - Delete(service.Name, c.deleteOptions) + Delete(service.Name, &options) if !k8sutil.ResourceNotFound(err) { c.logger.Debugf("Connection pool service was already deleted")