add support for global maintenance windows
This commit is contained in:
parent
f05150a81e
commit
cd1970994e
|
|
@ -101,6 +101,20 @@ spec:
|
|||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
default: false
|
||||
maintenance_windows:
|
||||
type: array
|
||||
nullable: true
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
endTime:
|
||||
type: string
|
||||
everyday:
|
||||
type: boolean
|
||||
startTime:
|
||||
type: string
|
||||
weekday:
|
||||
type: string
|
||||
max_instances:
|
||||
type: integer
|
||||
description: "-1 = disabled"
|
||||
|
|
|
|||
|
|
@ -49,6 +49,10 @@ configGeneral:
|
|||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: false
|
||||
|
||||
# maintenance windows applied to all Postgres clusters unless overridden in the manifest
|
||||
# maintenance_windows:
|
||||
# - "Sun:01:00-06:00"
|
||||
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: -1
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
|
|||
|
|
@ -116,9 +116,9 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
|
||||
* **maintenanceWindows**
|
||||
a list which defines specific time frames when certain maintenance operations
|
||||
such as automatic major upgrades or master pod migration. Accepted formats
|
||||
are "01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for specific
|
||||
days, with all times in UTC.
|
||||
such as automatic major upgrades or master pod migration are allowed to happen.
|
||||
Accepted formats are "01:00-06:00" for daily maintenance windows or
|
||||
"Sat:00:00-04:00" for specific days, with all times in UTC.
|
||||
|
||||
* **users**
|
||||
a map of usernames to user flags for the users that should be created in the
|
||||
|
|
|
|||
|
|
@ -173,6 +173,14 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
the thresholds. The value must be `"true"` to be effective. The default is empty
|
||||
which means the feature is disabled.
|
||||
|
||||
* **maintenance_windows**
|
||||
a list which defines specific time frames when certain maintenance
|
||||
operations such as automatic major upgrades or master pod migration are
|
||||
allowed to happen for all database clusters. Accepted formats are
|
||||
"01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for
|
||||
specific days, with all times in UTC. Locally defined maintenance
|
||||
windows take precedence over globally configured ones.
|
||||
|
||||
* **resync_period**
|
||||
period between consecutive sync requests. The default is `30m`.
|
||||
|
||||
|
|
|
|||
2
go.mod
2
go.mod
|
|
@ -18,6 +18,7 @@ require (
|
|||
k8s.io/apiextensions-apiserver v0.32.9
|
||||
k8s.io/apimachinery v0.32.9
|
||||
k8s.io/client-go v0.32.9
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -77,7 +78,6 @@ require (
|
|||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
tool (
|
||||
|
|
|
|||
|
|
@ -99,6 +99,20 @@ spec:
|
|||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
default: false
|
||||
maintenance_windows:
|
||||
type: array
|
||||
nullable: true
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
endTime:
|
||||
type: string
|
||||
everyday:
|
||||
type: boolean
|
||||
startTime:
|
||||
type: string
|
||||
weekday:
|
||||
type: string
|
||||
max_instances:
|
||||
type: integer
|
||||
description: "-1 = disabled"
|
||||
|
|
|
|||
|
|
@ -127,6 +127,29 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"kubernetes_use_configmaps": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"maintenance_windows": {
|
||||
Type: "array",
|
||||
Nullable: true,
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"end_time": {
|
||||
Type: "string",
|
||||
},
|
||||
"everyday": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"start_time": {
|
||||
Type: "string",
|
||||
},
|
||||
"weekday": {
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"max_instances": {
|
||||
Type: "integer",
|
||||
Description: "-1 = disabled",
|
||||
|
|
@ -968,7 +991,6 @@ func buildCRD(name, kind, plural, list, short string,
|
|||
}
|
||||
}
|
||||
|
||||
//go:embed postgresql.crd.yaml
|
||||
var postgresqlCRDYAML []byte
|
||||
|
||||
// PostgresCRD returns CustomResourceDefinition built from PostgresCRDResource
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error {
|
|||
err error
|
||||
)
|
||||
|
||||
parts := strings.Split(string(data[1:len(data)-1]), "-")
|
||||
parts := strings.Split(string(data), "-")
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("incorrect maintenance window format")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -266,6 +266,7 @@ type OperatorConfigurationData struct {
|
|||
Workers uint32 `json:"workers,omitempty"`
|
||||
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
||||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenance_windows,omitempty"`
|
||||
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
||||
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
||||
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers
|
||||
|
|
@ -285,10 +286,9 @@ type OperatorConfigurationData struct {
|
|||
ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"`
|
||||
Patroni PatroniConfiguration `json:"patroni"`
|
||||
|
||||
MinInstances int32 `json:"min_instances,omitempty"`
|
||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
|
||||
|
||||
MinInstances int32 `json:"min_instances,omitempty"`
|
||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
|
||||
IgnoreResourcesLimitsAnnotationKey string `json:"ignore_resources_limits_annotation_key,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -433,6 +433,13 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MaintenanceWindows != nil {
|
||||
in, out := &in.MaintenanceWindows, &out.MaintenanceWindows
|
||||
*out = make([]MaintenanceWindow, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ShmVolume != nil {
|
||||
in, out := &in.ShmVolume, &out.ShmVolume
|
||||
*out = new(bool)
|
||||
|
|
|
|||
|
|
@ -989,7 +989,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
return fmt.Errorf("could not set cluster status to updating: %w", err)
|
||||
}
|
||||
|
||||
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
if !c.isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
// do not apply any major version related changes yet
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
}
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
if !c.isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
c.logger.Infof("skipping major version upgrade, not in maintenance window")
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -280,7 +280,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
}
|
||||
|
||||
scheduleSwitchover := false
|
||||
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
if !c.isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
|
||||
c.logger.Infof("postponing switchover, not in maintenance window")
|
||||
scheduleSwitchover = true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
if !c.isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
|
||||
// do not apply any major version related changes yet
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
}
|
||||
|
|
|
|||
|
|
@ -663,15 +663,28 @@ func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (ac
|
|||
return resources, nil
|
||||
}
|
||||
|
||||
func isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool {
|
||||
if len(specMaintenanceWindows) == 0 {
|
||||
func (c *Cluster) isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool {
|
||||
if len(specMaintenanceWindows) == 0 && len(c.OpConfig.MaintenanceWindows) == 0 {
|
||||
return true
|
||||
}
|
||||
now := time.Now()
|
||||
currentDay := now.Weekday()
|
||||
currentTime := now.Format("15:04")
|
||||
|
||||
for _, window := range specMaintenanceWindows {
|
||||
maintenanceWindows := specMaintenanceWindows
|
||||
if len(maintenanceWindows) == 0 {
|
||||
maintenanceWindows = make([]acidv1.MaintenanceWindow, 0, len(c.OpConfig.MaintenanceWindows))
|
||||
for _, windowStr := range c.OpConfig.MaintenanceWindows {
|
||||
var window acidv1.MaintenanceWindow
|
||||
if err := window.UnmarshalJSON([]byte(windowStr)); err != nil {
|
||||
c.logger.Errorf("could not parse default maintenance window %q: %v", windowStr, err)
|
||||
continue
|
||||
}
|
||||
maintenanceWindows = append(maintenanceWindows, window)
|
||||
}
|
||||
}
|
||||
|
||||
for _, window := range maintenanceWindows {
|
||||
startTime := window.StartTime.Format("15:04")
|
||||
endTime := window.EndTime.Format("15:04")
|
||||
|
||||
|
|
|
|||
|
|
@ -657,6 +657,22 @@ func Test_trimCronjobName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIsInMaintenanceWindow(t *testing.T) {
|
||||
cluster := New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
DefaultCPURequest: "300m",
|
||||
DefaultCPULimit: "300m",
|
||||
DefaultMemoryRequest: "300Mi",
|
||||
DefaultMemoryLimit: "300Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
|
||||
now := time.Now()
|
||||
futureTimeStart := now.Add(1 * time.Hour)
|
||||
futureTimeStartFormatted := futureTimeStart.Format("15:04")
|
||||
|
|
@ -664,14 +680,16 @@ func TestIsInMaintenanceWindow(t *testing.T) {
|
|||
futureTimeEndFormatted := futureTimeEnd.Format("15:04")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
windows []acidv1.MaintenanceWindow
|
||||
expected bool
|
||||
name string
|
||||
windows []acidv1.MaintenanceWindow
|
||||
configWindows []string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no maintenance windows",
|
||||
windows: nil,
|
||||
expected: true,
|
||||
name: "no maintenance windows",
|
||||
windows: nil,
|
||||
configWindows: nil,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "maintenance windows with everyday",
|
||||
|
|
@ -682,7 +700,8 @@ func TestIsInMaintenanceWindow(t *testing.T) {
|
|||
EndTime: mustParseTime("23:59"),
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
configWindows: nil,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "maintenance windows with weekday",
|
||||
|
|
@ -693,7 +712,8 @@ func TestIsInMaintenanceWindow(t *testing.T) {
|
|||
EndTime: mustParseTime("23:59"),
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
configWindows: nil,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "maintenance windows with future interval time",
|
||||
|
|
@ -706,12 +726,25 @@ func TestIsInMaintenanceWindow(t *testing.T) {
|
|||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "global maintenance windows with future interval time",
|
||||
windows: nil,
|
||||
configWindows: []string{fmt.Sprintf("%s-%s", futureTimeStartFormatted, futureTimeEndFormatted)},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "global maintenance windows all day",
|
||||
windows: nil,
|
||||
configWindows: []string{"00:00-23:59"},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cluster.OpConfig.MaintenanceWindows = tt.configWindows
|
||||
cluster.Spec.MaintenanceWindows = tt.windows
|
||||
if isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected {
|
||||
if cluster.isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected {
|
||||
t.Errorf("Expected isInMaintenanceWindow to return %t", tt.expected)
|
||||
}
|
||||
})
|
||||
|
|
|
|||
|
|
@ -51,6 +51,16 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.ShmVolume = util.CoalesceBool(fromCRD.ShmVolume, util.True())
|
||||
result.SidecarImages = fromCRD.SidecarImages
|
||||
result.SidecarContainers = fromCRD.SidecarContainers
|
||||
if len(fromCRD.MaintenanceWindows) > 0 {
|
||||
result.MaintenanceWindows = make([]string, 0, len(fromCRD.MaintenanceWindows))
|
||||
for _, window := range fromCRD.MaintenanceWindows {
|
||||
w, err := window.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("could not marshal configured maintenance window: %v", err))
|
||||
}
|
||||
result.MaintenanceWindows = append(result.MaintenanceWindows, string(w))
|
||||
}
|
||||
}
|
||||
|
||||
// user config
|
||||
result.SuperUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.SuperUsername, "postgres")
|
||||
|
|
|
|||
|
|
@ -63,10 +63,9 @@ type Resources struct {
|
|||
NodeReadinessLabelMerge string `name:"node_readiness_label_merge" default:"OR"`
|
||||
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
||||
|
||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||
IgnoreInstanceLimitsAnnotationKey string `name:"ignore_instance_limits_annotation_key"`
|
||||
|
||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||
IgnoreInstanceLimitsAnnotationKey string `name:"ignore_instance_limits_annotation_key"`
|
||||
IgnoreResourcesLimitsAnnotationKey string `name:"ignore_resources_limits_annotation_key"`
|
||||
}
|
||||
|
||||
|
|
@ -178,6 +177,7 @@ type Config struct {
|
|||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-17:4.0-p3"`
|
||||
MaintenanceWindows []string `name:"maintenance_windows"`
|
||||
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
|
||||
SidecarContainers []v1.Container `name:"sidecars"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
|
|
|
|||
Loading…
Reference in New Issue