minor fix for restart TTL
This commit is contained in:
parent
2f4b554fec
commit
db76b8c642
|
|
@ -59,8 +59,8 @@ func (c *Cluster) deleteStreams() error {
|
|||
|
||||
func (c *Cluster) syncPostgresConfig() error {
|
||||
|
||||
desiredPostgresConfig := c.Spec.Patroni
|
||||
slots := desiredPostgresConfig.Slots
|
||||
desiredPatroniConfig := c.Spec.Patroni
|
||||
slots := desiredPatroniConfig.Slots
|
||||
|
||||
for _, stream := range c.Spec.Streams {
|
||||
slotName := c.getLogicalReplicationSlot(stream.Database)
|
||||
|
|
@ -80,7 +80,7 @@ func (c *Cluster) syncPostgresConfig() error {
|
|||
for slotName, slot := range slots {
|
||||
c.logger.Debugf("creating logical replication slot %q in database %q", slotName, slot["database"])
|
||||
}
|
||||
desiredPostgresConfig.Slots = slots
|
||||
desiredPatroniConfig.Slots = slots
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -94,13 +94,13 @@ func (c *Cluster) syncPostgresConfig() error {
|
|||
}
|
||||
for i, pod := range pods {
|
||||
podName := util.NameFromMeta(pods[i].ObjectMeta)
|
||||
effectivePostgresConfig, effectivePgParameters, err := c.patroni.GetConfig(&pod)
|
||||
effectivePatroniConfig, effectivePgParameters, err := c.patroni.GetConfig(&pod)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not get Postgres config from pod %s: %v", podName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = c.checkAndSetGlobalPostgreSQLConfiguration(&pod, effectivePostgresConfig, desiredPostgresConfig, effectivePgParameters, desiredPgParameters)
|
||||
_, err = c.checkAndSetGlobalPostgreSQLConfiguration(&pod, effectivePatroniConfig, desiredPatroniConfig, effectivePgParameters, desiredPgParameters)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not set PostgreSQL configuration options for pod %s: %v", podName, err)
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -271,7 +271,7 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
|||
func (c *Cluster) syncStatefulSet() error {
|
||||
var (
|
||||
masterPod *v1.Pod
|
||||
effectivePostgresConfig map[string]interface{}
|
||||
restartTTL uint32
|
||||
instanceRestartRequired bool
|
||||
)
|
||||
|
||||
|
|
@ -404,7 +404,6 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
emptyPatroniConfig := acidv1.Patroni{}
|
||||
podName := util.NameFromMeta(pods[i].ObjectMeta)
|
||||
patroniConfig, pgParameters, err := c.patroni.GetConfig(&pod)
|
||||
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not get Postgres config from pod %s: %v", podName, err)
|
||||
continue
|
||||
|
|
@ -418,6 +417,7 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
c.logger.Warningf("could not set PostgreSQL configuration options for pod %s: %v", podName, err)
|
||||
continue
|
||||
}
|
||||
restartTTL = patroniConfig.TTL
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
@ -425,10 +425,6 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
// if the config update requires a restart, call Patroni restart for replicas first, then master
|
||||
if instanceRestartRequired {
|
||||
c.logger.Debug("restarting Postgres server within pods")
|
||||
ttl, ok := effectivePostgresConfig["ttl"].(int32)
|
||||
if !ok {
|
||||
ttl = 30
|
||||
}
|
||||
for i, pod := range pods {
|
||||
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
|
||||
if role == Master {
|
||||
|
|
@ -436,7 +432,7 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
continue
|
||||
}
|
||||
c.restartInstance(&pod)
|
||||
time.Sleep(time.Duration(ttl) * time.Second)
|
||||
time.Sleep(time.Duration(restartTTL) * time.Second)
|
||||
}
|
||||
|
||||
if masterPod != nil {
|
||||
|
|
|
|||
Loading…
Reference in New Issue