Set statefulset update and management policy explicitly (#515)

* fix logging in retry

* explicitly set the stateful set update strategy to onDelete

* add podManagementPolicy
This commit is contained in:
Sergey Dudoladov 2019-03-13 11:49:18 +01:00 committed by GitHub
parent db72d82f14
commit 0b53dbe5dc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 24 additions and 1 deletions

View File

@ -226,6 +226,11 @@ configuration they are grouped under the `kubernetes` key.
[topology key](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels)
for pod anti affinity. The default is `kubernetes.io/hostname`.
* **pod_management_policy**
specify the
[pod management policy](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies)
of stateful sets of PG clusters. The default is `ordered_ready`, the second possible value is `parallel`.
## Kubernetes resource requests
This group allows you to configure resource requests for the Postgres pods.

View File

@ -44,6 +44,7 @@ data:
pod_terminate_grace_period: 5m
pod_deletion_wait_timeout: 10m
pod_label_wait_timeout: 10m
pod_management_policy: "ordered_ready"
ready_wait_interval: 3s
ready_wait_timeout: 30s
# master_pod_move_timeout: 10m

View File

@ -858,6 +858,20 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
numberOfInstances := c.getNumberOfInstances(spec)
// the operator has domain-specific logic on how to do rolling updates of PG clusters
// so we do not use default rolling updates implemented by stateful sets
// that leaves the legacy "OnDelete" update strategy as the only option
updateStrategy := v1beta1.StatefulSetUpdateStrategy{Type: v1beta1.OnDeleteStatefulSetStrategyType}
var podManagementPolicy v1beta1.PodManagementPolicyType
if c.OpConfig.PodManagementPolicy == "ordered_ready" {
podManagementPolicy = v1beta1.OrderedReadyPodManagement
} else if c.OpConfig.PodManagementPolicy == "parallel" {
podManagementPolicy = v1beta1.ParallelPodManagement
} else {
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
}
statefulSet := &v1beta1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: c.statefulSetName(),
@ -871,6 +885,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
ServiceName: c.serviceName(Master),
Template: *podTemplate,
VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate},
UpdateStrategy: updateStrategy,
PodManagementPolicy: podManagementPolicy,
},
}

View File

@ -184,7 +184,7 @@ func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
)
if err != nil {
c.logger.Warning("failed to move master pods from the node %q: timeout of %v minutes expired", node.Name, c.opConfig.MasterPodMoveTimeout)
c.logger.Warningf("failed to move master pods from the node %q: timeout of %v minutes expired", node.Name, c.opConfig.MasterPodMoveTimeout)
}
}

View File

@ -109,6 +109,7 @@ type Config struct {
ClusterHistoryEntries int `name:"cluster_history_entries" default:"1000"`
TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"`
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"`
ProtectedRoles []string `name:"protected_role_names" default:"admin"`
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" defaults:"false"`