Enable specifying PVC retention policy for auto deletion (#2343)
* Enable specifying PVC retention policy for auto deletion * enable StatefulSetAutoDeletePVC in featureGates * skip node affinity test
This commit is contained in:
parent
552bd26c0f
commit
36389b27bc
|
|
@ -278,6 +278,19 @@ spec:
|
|||
pdb_name_format:
|
||||
type: string
|
||||
default: "postgres-{cluster}-pdb"
|
||||
persistent_volume_claim_retention_policy:
|
||||
type: object
|
||||
properties:
|
||||
when_deleted:
|
||||
type: string
|
||||
enum:
|
||||
- "delete"
|
||||
- "retain"
|
||||
when_scaled:
|
||||
type: string
|
||||
enum:
|
||||
- "delete"
|
||||
- "retain"
|
||||
pod_antiaffinity_preferred_during_scheduling:
|
||||
type: boolean
|
||||
default: false
|
||||
|
|
|
|||
|
|
@ -165,6 +165,10 @@ configKubernetes:
|
|||
|
||||
# defines the template for PDB (Pod Disruption Budget) names
|
||||
pdb_name_format: "postgres-{cluster}-pdb"
|
||||
# specify the PVC retention policy when scaling down and/or deleting
|
||||
persistent_volume_claim_retention_policy:
|
||||
when_deleted: "retain"
|
||||
when_scaled: "retain"
|
||||
# switches pod anti affinity type to `preferredDuringSchedulingIgnoredDuringExecution`
|
||||
pod_antiaffinity_preferred_during_scheduling: false
|
||||
# override topology key for pod anti affinity
|
||||
|
|
|
|||
|
|
@ -4,3 +4,5 @@ nodes:
|
|||
- role: control-plane
|
||||
- role: worker
|
||||
- role: worker
|
||||
featureGates:
|
||||
StatefulSetAutoDeletePVC: true
|
||||
|
|
|
|||
|
|
@ -202,6 +202,9 @@ class K8s:
|
|||
return len(self.api.policy_v1.list_namespaced_pod_disruption_budget(
|
||||
namespace, label_selector=labels).items)
|
||||
|
||||
def count_pvcs_with_label(self, labels, namespace='default'):
|
||||
return len(self.api.core_v1.list_namespaced_persistent_volume_claim(namespace, label_selector=labels).items)
|
||||
|
||||
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||
|
|
@ -506,6 +509,9 @@ class K8sBase:
|
|||
return len(self.api.policy_v1.list_namespaced_pod_disruption_budget(
|
||||
namespace, label_selector=labels).items)
|
||||
|
||||
def count_pvcs_with_label(self, labels, namespace='default'):
|
||||
return len(self.api.core_v1.list_namespaced_persistent_volume_claim(namespace, label_selector=labels).items)
|
||||
|
||||
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||
|
|
|
|||
|
|
@ -1200,6 +1200,67 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
self.evantuallyEqual(check_version_14, "14", "Version was not upgrade to 14")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_persistent_volume_claim_retention_policy(self):
|
||||
'''
|
||||
Test the retention policy for persistent volume claim
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 2, "PVCs is not equal to number of instance")
|
||||
|
||||
# patch the pvc retention policy to enable delete when scale down
|
||||
patch_scaled_policy_delete = {
|
||||
"data": {
|
||||
"persistent_volume_claim_retention_policy": "when_deleted:retain,when_scaled:delete"
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_scaled_policy_delete)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
pg_patch_scale_down_instances = {
|
||||
'spec': {
|
||||
'numberOfInstances': 1
|
||||
}
|
||||
}
|
||||
# decrease the number of instances
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', pg_patch_scale_down_instances)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},"Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 1, "PVCs is not deleted when scaled down")
|
||||
|
||||
pg_patch_scale_up_instances = {
|
||||
'spec': {
|
||||
'numberOfInstances': 2
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', pg_patch_scale_up_instances)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},"Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 2, "PVCs is not equal to number of instances")
|
||||
|
||||
# reset retention policy to retain
|
||||
patch_scaled_policy_retain = {
|
||||
"data": {
|
||||
"persistent_volume_claim_retention_policy": "when_deleted:retain,when_scaled:retain"
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_scaled_policy_retain)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
# decrease the number of instances
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', pg_patch_scale_down_instances)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},"Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 2, "PVCs is deleted when scaled down")
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', pg_patch_scale_up_instances)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},"Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 2, "PVCs is not equal to number of instances")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_resource_generation(self):
|
||||
'''
|
||||
|
|
@ -1297,6 +1358,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
time.sleep(5)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
@unittest.skip("Skipping this test until fixed")
|
||||
def test_node_affinity(self):
|
||||
'''
|
||||
Add label to a node and update postgres cluster spec to deploy only on a node with that label
|
||||
|
|
|
|||
|
|
@ -117,6 +117,7 @@ data:
|
|||
# password_rotation_interval: "90"
|
||||
# password_rotation_user_retention: "180"
|
||||
pdb_name_format: "postgres-{cluster}-pdb"
|
||||
persistent_volume_claim_retention_policy: "when_deleted:retain,when_scaled:retain"
|
||||
# pod_antiaffinity_preferred_during_scheduling: "false"
|
||||
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||
pod_deletion_wait_timeout: 10m
|
||||
|
|
|
|||
|
|
@ -276,6 +276,19 @@ spec:
|
|||
pdb_name_format:
|
||||
type: string
|
||||
default: "postgres-{cluster}-pdb"
|
||||
persistent_volume_claim_retention_policy:
|
||||
type: object
|
||||
properties:
|
||||
when_deleted:
|
||||
type: string
|
||||
enum:
|
||||
- "delete"
|
||||
- "retain"
|
||||
when_scaled:
|
||||
type: string
|
||||
enum:
|
||||
- "delete"
|
||||
- "retain"
|
||||
pod_antiaffinity_preferred_during_scheduling:
|
||||
type: boolean
|
||||
default: false
|
||||
|
|
|
|||
|
|
@ -84,6 +84,9 @@ configuration:
|
|||
# node_readiness_label_merge: "OR"
|
||||
oauth_token_secret_name: postgresql-operator
|
||||
pdb_name_format: "postgres-{cluster}-pdb"
|
||||
persistent_volume_claim_retention_policy:
|
||||
when_deleted: "retain"
|
||||
when_scaled: "retain"
|
||||
pod_antiaffinity_preferred_during_scheduling: false
|
||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||
# pod_environment_configmap: "default/my-custom-config"
|
||||
|
|
|
|||
|
|
@ -1388,6 +1388,33 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"pdb_name_format": {
|
||||
Type: "string",
|
||||
},
|
||||
"persistent_volume_claim_retention_policy": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"when_deleted": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1.JSON{
|
||||
{
|
||||
Raw: []byte(`"delete"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"retain"`),
|
||||
},
|
||||
},
|
||||
},
|
||||
"when_scaled": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1.JSON{
|
||||
{
|
||||
Raw: []byte(`"delete"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"retain"`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"pod_antiaffinity_preferred_during_scheduling": {
|
||||
Type: "boolean",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -100,6 +100,7 @@ type KubernetesMetaConfiguration struct {
|
|||
PodAntiAffinityPreferredDuringScheduling bool `json:"pod_antiaffinity_preferred_during_scheduling,omitempty"`
|
||||
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
|
||||
PodManagementPolicy string `json:"pod_management_policy,omitempty"`
|
||||
PersistentVolumeClaimRetentionPolicy map[string]string `json:"persistent_volume_claim_retention_policy,omitempty"`
|
||||
EnableReadinessProbe bool `json:"enable_readiness_probe,omitempty"`
|
||||
EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -409,6 +409,12 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
reasons = append(reasons, "new statefulset's pod management policy do not match")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(c.Statefulset.Spec.PersistentVolumeClaimRetentionPolicy, statefulSet.Spec.PersistentVolumeClaimRetentionPolicy) {
|
||||
match = false
|
||||
needsReplace = true
|
||||
reasons = append(reasons, "new statefulset's persistent volume claim retention policy do not match")
|
||||
}
|
||||
|
||||
needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons)
|
||||
needsRollUpdate, reasons = c.compareContainers("containers", c.Statefulset.Spec.Template.Spec.Containers, statefulSet.Spec.Template.Spec.Containers, needsRollUpdate, reasons)
|
||||
|
||||
|
|
|
|||
|
|
@ -1440,6 +1440,19 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
||||
}
|
||||
|
||||
var persistentVolumeClaimRetentionPolicy appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy
|
||||
if c.OpConfig.PersistentVolumeClaimRetentionPolicy["when_deleted"] == "delete" {
|
||||
persistentVolumeClaimRetentionPolicy.WhenDeleted = appsv1.DeletePersistentVolumeClaimRetentionPolicyType
|
||||
} else {
|
||||
persistentVolumeClaimRetentionPolicy.WhenDeleted = appsv1.RetainPersistentVolumeClaimRetentionPolicyType
|
||||
}
|
||||
|
||||
if c.OpConfig.PersistentVolumeClaimRetentionPolicy["when_scaled"] == "delete" {
|
||||
persistentVolumeClaimRetentionPolicy.WhenScaled = appsv1.DeletePersistentVolumeClaimRetentionPolicyType
|
||||
} else {
|
||||
persistentVolumeClaimRetentionPolicy.WhenScaled = appsv1.RetainPersistentVolumeClaimRetentionPolicyType
|
||||
}
|
||||
|
||||
statefulSet := &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.statefulSetName(),
|
||||
|
|
@ -1448,13 +1461,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
|
||||
},
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Replicas: &numberOfInstances,
|
||||
Selector: c.labelsSelector(),
|
||||
ServiceName: c.serviceName(Master),
|
||||
Template: *podTemplate,
|
||||
VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate},
|
||||
UpdateStrategy: updateStrategy,
|
||||
PodManagementPolicy: podManagementPolicy,
|
||||
Replicas: &numberOfInstances,
|
||||
Selector: c.labelsSelector(),
|
||||
ServiceName: c.serviceName(Master),
|
||||
Template: *podTemplate,
|
||||
VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate},
|
||||
UpdateStrategy: updateStrategy,
|
||||
PodManagementPolicy: podManagementPolicy,
|
||||
PersistentVolumeClaimRetentionPolicy: &persistentVolumeClaimRetentionPolicy,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -119,6 +119,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.NodeReadinessLabelMerge = fromCRD.Kubernetes.NodeReadinessLabelMerge
|
||||
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
||||
result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready")
|
||||
result.PersistentVolumeClaimRetentionPolicy = fromCRD.Kubernetes.PersistentVolumeClaimRetentionPolicy
|
||||
result.EnableReadinessProbe = fromCRD.Kubernetes.EnableReadinessProbe
|
||||
result.MasterPodMoveTimeout = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout), "10m")
|
||||
result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity
|
||||
|
|
|
|||
|
|
@ -246,6 +246,7 @@ type Config struct {
|
|||
PatroniAPICheckInterval time.Duration `name:"patroni_api_check_interval" default:"1s"`
|
||||
PatroniAPICheckTimeout time.Duration `name:"patroni_api_check_timeout" default:"5s"`
|
||||
EnablePatroniFailsafeMode *bool `name:"enable_patroni_failsafe_mode" default:"false"`
|
||||
PersistentVolumeClaimRetentionPolicy map[string]string `name:"persistent_volume_claim_retention_policy" default:"when_deleted:retain,when_scaled:retain"`
|
||||
}
|
||||
|
||||
// MustMarshal marshals the config or panics
|
||||
|
|
|
|||
Loading…
Reference in New Issue