fix TestNodeAffinity in e2e tests
This commit is contained in:
		
							parent
							
								
									ab25fb29b7
								
							
						
					
					
						commit
						646eb6ec9a
					
				| 
						 | 
					@ -178,7 +178,7 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
            self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
 | 
					            self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
 | 
				
			||||||
                                "Operator does not get in sync")
 | 
					                                "Operator does not get in sync")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            # changed security context of postrges container should trigger a rolling update
 | 
					            # changed security context of postgres container should trigger a rolling update
 | 
				
			||||||
            k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
 | 
					            k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
 | 
				
			||||||
            k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
 | 
					            k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -965,13 +965,9 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
                body=patch_node_remove_affinity_config)
 | 
					                body=patch_node_remove_affinity_config)
 | 
				
			||||||
            self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
 | 
					            self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
 | 
					            # node affinity change should cause another rolling update and relocation of replica
 | 
				
			||||||
            self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
 | 
					            k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
 | 
				
			||||||
 | 
					            k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
 | 
				
			||||||
            # remove node affinity to move replica away from master node
 | 
					 | 
				
			||||||
            nm, new_replica_nodes = k8s.get_cluster_nodes()
 | 
					 | 
				
			||||||
            new_master_node = nm[0]
 | 
					 | 
				
			||||||
            self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
        except timeout_decorator.TimeoutError:
 | 
					        except timeout_decorator.TimeoutError:
 | 
				
			||||||
            print('Operator log: {}'.format(k8s.get_operator_log()))
 | 
					            print('Operator log: {}'.format(k8s.get_operator_log()))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue