rename restartInstances to syncPostgreSQLConfiguration

This commit is contained in:
Felix Kunde 2021-07-30 14:37:43 +02:00
parent 20ca09507a
commit 8d5df0c758
2 changed files with 4 additions and 5 deletions

View File

@ -852,7 +852,7 @@ class EndToEndTestCase(unittest.TestCase):
try:
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.test_namespace)
k8s.wait_for_pod_start('spilo-role=replica', self.test_namespace)
k8s.wait_for_pod_start("spilo-role=replica", self.test_namespace)
self.assert_master_is_unique(self.test_namespace, "acid-test-cluster")
except timeout_decorator.TimeoutError:
@ -1501,7 +1501,6 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.update_config(patch_delete_annotations)
def get_failover_targets(self, master_node, replica_nodes):
'''
If all pods live on the same node, failover will happen to other worker(s)

View File

@ -394,11 +394,11 @@ func (c *Cluster) syncStatefulSet() error {
masterPod = &pods[i]
continue
}
c.restartInstance(&pod)
c.syncPostgreSQLConfiguration(&pod)
}
if masterPod != nil {
c.restartInstance(masterPod)
c.syncPostgreSQLConfiguration(masterPod)
}
// if we get here we also need to re-create the pods (either leftovers from the old
@ -414,7 +414,7 @@ func (c *Cluster) syncStatefulSet() error {
return nil
}
func (c *Cluster) restartInstance(pod *v1.Pod) {
func (c *Cluster) syncPostgreSQLConfiguration(pod *v1.Pod) {
podName := util.NameFromMeta(pod.ObjectMeta)
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])