Add topologySpreadConstraints configuration to pod spec.
This commit is contained in:
		
							parent
							
								
									fa4bc21538
								
							
						
					
					
						commit
						263242a5e1
					
				|  | @ -1646,7 +1646,6 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|         # toggle pod anti affinity to move replica away from master node |         # toggle pod anti affinity to move replica away from master node | ||||||
|         self.assert_distributed_pods(master_nodes) |         self.assert_distributed_pods(master_nodes) | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
|     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|     def test_overwrite_pooler_deployment(self): |     def test_overwrite_pooler_deployment(self): | ||||||
|         pooler_name = 'acid-minimal-cluster-pooler' |         pooler_name = 'acid-minimal-cluster-pooler' | ||||||
|  | @ -2385,6 +2384,56 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|         # toggle pod anti affinity to move replica away from master node |         # toggle pod anti affinity to move replica away from master node | ||||||
|         self.assert_distributed_pods(master_nodes) |         self.assert_distributed_pods(master_nodes) | ||||||
| 
 | 
 | ||||||
|  |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|  |     def test_topology_spread_constraints(self): | ||||||
|  |         ''' | ||||||
|  |             Enable topologySpreadConstraints for pods | ||||||
|  |         ''' | ||||||
|  |         k8s = self.k8s | ||||||
|  |         cluster_labels = "application=spilo,cluster-name=acid-minimal-cluster" | ||||||
|  | 
 | ||||||
|  |         # Verify we are in good state from potential previous tests | ||||||
|  |         self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") | ||||||
|  | 
 | ||||||
|  |         master_nodes, replica_nodes = k8s.get_cluster_nodes() | ||||||
|  |         self.assertNotEqual(master_nodes, []) | ||||||
|  |         self.assertNotEqual(replica_nodes, []) | ||||||
|  | 
 | ||||||
|  |         # Patch label to nodes for topologySpreadConstraints | ||||||
|  |         patch_node_label = { | ||||||
|  |             "metadata": { | ||||||
|  |                 "labels": { | ||||||
|  |                     "topology.kubernetes.io/zone": "zalando" | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |         k8s.api.core_v1.patch_node(master_nodes[0], patch_node_label) | ||||||
|  |         k8s.api.core_v1.patch_node(replica_nodes[0], patch_node_label) | ||||||
|  | 
 | ||||||
|  |         # Scale-out postgresql pods | ||||||
|  |         k8s.api.custom_objects_api.patch_namespaced_custom_object("acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", | ||||||
|  |             {"spec": {"numberOfInstances": 6}}) | ||||||
|  |         self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
|  |         self.eventuallyEqual(lambda: k8s.count_pods_with_label(cluster_labels), 6, "Postgresql StatefulSet are scale to 6") | ||||||
|  |         self.eventuallyEqual(lambda: k8s.count_running_pods(), 6, "All pods are running") | ||||||
|  | 
 | ||||||
|  |         worker_node_1 = 0 | ||||||
|  |         worker_node_2 = 0 | ||||||
|  |         pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_labels) | ||||||
|  |         for pod in pods.items: | ||||||
|  |             if pod.spec.node_name == 'postgres-operator-e2e-tests-worker': | ||||||
|  |                 worker_node_1 += 1 | ||||||
|  |             elif pod.spec.node_name == 'postgres-operator-e2e-tests-worker2': | ||||||
|  |                 worker_node_2 += 1 | ||||||
|  | 
 | ||||||
|  |         self.assertEqual(worker_node_1, worker_node_2) | ||||||
|  |         self.assertEqual(worker_node_1, 3) | ||||||
|  |         self.assertEqual(worker_node_2, 3) | ||||||
|  | 
 | ||||||
|  |         # Scale-it postgresql pods to previous replicas | ||||||
|  |         k8s.api.custom_objects_api.patch_namespaced_custom_object("acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", | ||||||
|  |             {"spec": {"numberOfInstances": 2}}) | ||||||
|  | 
 | ||||||
|     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|     def test_zz_cluster_deletion(self): |     def test_zz_cluster_deletion(self): | ||||||
|         ''' |         ''' | ||||||
|  | @ -2460,7 +2509,7 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|             self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted") |             self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted") | ||||||
|             self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") |             self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") | ||||||
|             self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 8, "Secrets were deleted although disabled in config") |             self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 8, "Secrets were deleted although disabled in config") | ||||||
|             self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 3, "PVCs were deleted although disabled in config") |             self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 6, "PVCs were deleted although disabled in config") | ||||||
| 
 | 
 | ||||||
|         except timeout_decorator.TimeoutError: |         except timeout_decorator.TimeoutError: | ||||||
|             print('Operator log: {}'.format(k8s.get_operator_log())) |             print('Operator log: {}'.format(k8s.get_operator_log())) | ||||||
|  |  | ||||||
|  | @ -582,6 +582,12 @@ spec: | ||||||
|                         - PreferNoSchedule |                         - PreferNoSchedule | ||||||
|                     tolerationSeconds: |                     tolerationSeconds: | ||||||
|                       type: integer |                       type: integer | ||||||
|  |               topologySpreadConstraints: | ||||||
|  |                 type: array | ||||||
|  |                 nullable: true | ||||||
|  |                 items: | ||||||
|  |                   type: object | ||||||
|  |                   x-kubernetes-preserve-unknown-fields: true | ||||||
|               useLoadBalancer: |               useLoadBalancer: | ||||||
|                 type: boolean |                 type: boolean | ||||||
|                 description: deprecated |                 description: deprecated | ||||||
|  |  | ||||||
|  | @ -895,6 +895,16 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ | ||||||
| 							}, | 							}, | ||||||
| 						}, | 						}, | ||||||
| 					}, | 					}, | ||||||
|  | 					"topologySpreadConstraints": { | ||||||
|  | 						Type:     "array", | ||||||
|  | 						Nullable: true, | ||||||
|  | 						Items: &apiextv1.JSONSchemaPropsOrArray{ | ||||||
|  | 							Schema: &apiextv1.JSONSchemaProps{ | ||||||
|  | 								Type:                   "object", | ||||||
|  | 								XPreserveUnknownFields: util.True(), | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
| 					"useLoadBalancer": { | 					"useLoadBalancer": { | ||||||
| 						Type:        "boolean", | 						Type:        "boolean", | ||||||
| 						Description: "deprecated", | 						Description: "deprecated", | ||||||
|  |  | ||||||
|  | @ -70,6 +70,7 @@ type PostgresSpec struct { | ||||||
| 	PreparedDatabases         map[string]PreparedDatabase   `json:"preparedDatabases,omitempty"` | 	PreparedDatabases         map[string]PreparedDatabase   `json:"preparedDatabases,omitempty"` | ||||||
| 	SchedulerName             *string                       `json:"schedulerName,omitempty"` | 	SchedulerName             *string                       `json:"schedulerName,omitempty"` | ||||||
| 	NodeAffinity              *v1.NodeAffinity              `json:"nodeAffinity,omitempty"` | 	NodeAffinity              *v1.NodeAffinity              `json:"nodeAffinity,omitempty"` | ||||||
|  | 	TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` | ||||||
| 	Tolerations               []v1.Toleration               `json:"tolerations,omitempty"` | 	Tolerations               []v1.Toleration               `json:"tolerations,omitempty"` | ||||||
| 	Sidecars                  []Sidecar                     `json:"sidecars,omitempty"` | 	Sidecars                  []Sidecar                     `json:"sidecars,omitempty"` | ||||||
| 	InitContainers            []v1.Container                `json:"initContainers,omitempty"` | 	InitContainers            []v1.Container                `json:"initContainers,omitempty"` | ||||||
|  |  | ||||||
|  | @ -499,6 +499,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa | ||||||
| 		needsRollUpdate = true | 		needsRollUpdate = true | ||||||
| 		reasons = append(reasons, "new statefulset's pod affinity does not match the current one") | 		reasons = append(reasons, "new statefulset's pod affinity does not match the current one") | ||||||
| 	} | 	} | ||||||
|  | 	if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.TopologySpreadConstraints, statefulSet.Spec.Template.Spec.TopologySpreadConstraints) { | ||||||
|  | 		needsReplace = true | ||||||
|  | 		needsRollUpdate = true | ||||||
|  | 		reasons = append(reasons, "new statefulset's pod topologySpreadConstraints does not match the current one") | ||||||
|  | 	} | ||||||
| 	if len(c.Statefulset.Spec.Template.Spec.Tolerations) != len(statefulSet.Spec.Template.Spec.Tolerations) { | 	if len(c.Statefulset.Spec.Template.Spec.Tolerations) != len(statefulSet.Spec.Template.Spec.Tolerations) { | ||||||
| 		needsReplace = true | 		needsReplace = true | ||||||
| 		needsRollUpdate = true | 		needsRollUpdate = true | ||||||
|  |  | ||||||
|  | @ -604,6 +604,13 @@ func generatePodAntiAffinity(podAffinityTerm v1.PodAffinityTerm, preferredDuring | ||||||
| 	return podAntiAffinity | 	return podAntiAffinity | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func generateTopologySpreadConstraints(labels labels.Set, topologySpreadConstraints []v1.TopologySpreadConstraint) []v1.TopologySpreadConstraint { | ||||||
|  | 	for _, topologySpreadConstraint := range topologySpreadConstraints { | ||||||
|  | 		topologySpreadConstraint.LabelSelector = &metav1.LabelSelector{MatchLabels: labels} | ||||||
|  | 	} | ||||||
|  | 	return topologySpreadConstraints | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]string) []v1.Toleration { | func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]string) []v1.Toleration { | ||||||
| 	// allow to override tolerations by postgresql manifest
 | 	// allow to override tolerations by postgresql manifest
 | ||||||
| 	if len(*tolerationsSpec) > 0 { | 	if len(*tolerationsSpec) > 0 { | ||||||
|  | @ -809,6 +816,7 @@ func (c *Cluster) generatePodTemplate( | ||||||
| 	initContainers []v1.Container, | 	initContainers []v1.Container, | ||||||
| 	sidecarContainers []v1.Container, | 	sidecarContainers []v1.Container, | ||||||
| 	sharePgSocketWithSidecars *bool, | 	sharePgSocketWithSidecars *bool, | ||||||
|  | 	topologySpreadConstraintsSpec []v1.TopologySpreadConstraint, | ||||||
| 	tolerationsSpec *[]v1.Toleration, | 	tolerationsSpec *[]v1.Toleration, | ||||||
| 	spiloRunAsUser *int64, | 	spiloRunAsUser *int64, | ||||||
| 	spiloRunAsGroup *int64, | 	spiloRunAsGroup *int64, | ||||||
|  | @ -878,6 +886,10 @@ func (c *Cluster) generatePodTemplate( | ||||||
| 		podSpec.PriorityClassName = priorityClassName | 		podSpec.PriorityClassName = priorityClassName | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if len(topologySpreadConstraintsSpec) > 0 { | ||||||
|  | 		podSpec.TopologySpreadConstraints = generateTopologySpreadConstraints(labels, topologySpreadConstraintsSpec) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	if sharePgSocketWithSidecars != nil && *sharePgSocketWithSidecars { | 	if sharePgSocketWithSidecars != nil && *sharePgSocketWithSidecars { | ||||||
| 		addVarRunVolume(&podSpec) | 		addVarRunVolume(&podSpec) | ||||||
| 	} | 	} | ||||||
|  | @ -1469,6 +1481,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef | ||||||
| 		initContainers, | 		initContainers, | ||||||
| 		sidecarContainers, | 		sidecarContainers, | ||||||
| 		c.OpConfig.SharePgSocketWithSidecars, | 		c.OpConfig.SharePgSocketWithSidecars, | ||||||
|  | 		spec.TopologySpreadConstraints, | ||||||
| 		&tolerationSpec, | 		&tolerationSpec, | ||||||
| 		effectiveRunAsUser, | 		effectiveRunAsUser, | ||||||
| 		effectiveRunAsGroup, | 		effectiveRunAsGroup, | ||||||
|  | @ -2356,6 +2369,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { | ||||||
| 		[]v1.Container{}, | 		[]v1.Container{}, | ||||||
| 		[]v1.Container{}, | 		[]v1.Container{}, | ||||||
| 		util.False(), | 		util.False(), | ||||||
|  | 		[]v1.TopologySpreadConstraint{}, | ||||||
| 		&tolerationsSpec, | 		&tolerationsSpec, | ||||||
| 		nil, | 		nil, | ||||||
| 		nil, | 		nil, | ||||||
|  |  | ||||||
|  | @ -3984,3 +3984,46 @@ func TestGenerateCapabilities(t *testing.T) { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func TestTopologySpreadConstraints(t *testing.T) { | ||||||
|  | 	clusterName := "acid-test-cluster" | ||||||
|  | 	namespace := "default" | ||||||
|  | 
 | ||||||
|  | 	pg := acidv1.Postgresql{ | ||||||
|  | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
|  | 			Name:      clusterName, | ||||||
|  | 			Namespace: namespace, | ||||||
|  | 		}, | ||||||
|  | 		Spec: acidv1.PostgresSpec{ | ||||||
|  | 			NumberOfInstances: 1, | ||||||
|  | 			Resources: &acidv1.Resources{ | ||||||
|  | 				ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, | ||||||
|  | 				ResourceLimits:   acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, | ||||||
|  | 			}, | ||||||
|  | 			Volume: acidv1.Volume{ | ||||||
|  | 				Size: "1G", | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cluster := New( | ||||||
|  | 		Config{ | ||||||
|  | 			OpConfig: config.Config{ | ||||||
|  | 				PodManagementPolicy: "ordered_ready", | ||||||
|  | 			}, | ||||||
|  | 		}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) | ||||||
|  | 	cluster.Name = clusterName | ||||||
|  | 	cluster.Namespace = namespace | ||||||
|  | 	cluster.labelsSet(true) | ||||||
|  | 
 | ||||||
|  | 	s, err := cluster.generateStatefulSet(&pg.Spec) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	assert.Contains(t, s.Spec.Template.Spec.TopologySpreadConstraints, v1.TopologySpreadConstraint{ | ||||||
|  | 		MaxSkew:           int32(1), | ||||||
|  | 		TopologyKey:       "topology.kubernetes.io/zone", | ||||||
|  | 		WhenUnsatisfiable: v1.DoNotSchedule, | ||||||
|  | 		LabelSelector: &metav1.LabelSelector{ | ||||||
|  | 			MatchLabels: cluster.labelsSet(true), | ||||||
|  | 		}, | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue