Merge branch 'master' into add-ignore-slots-dcs-config

This commit is contained in:
Zavel Paytsev 2026-02-03 12:21:37 -08:00 committed by GitHub
commit ee47030783
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 219 additions and 97 deletions

View File

@ -13,7 +13,7 @@ LDFLAGS ?= -X=main.version=$(VERSION)
DOCKERDIR = docker
BASE_IMAGE ?= alpine:latest
IMAGE ?= $(BINARY)
IMAGE ?= ghcr.io/zalando/$(BINARY)
TAG ?= $(VERSION)
GITHEAD = $(shell git rev-parse --short HEAD)
GITURL = $(shell git config --get remote.origin.url)
@ -84,7 +84,7 @@ linux: ${SOURCES} $(GENERATED_CRDS)
macos: ${SOURCES} $(GENERATED_CRDS)
GOOS=darwin GOARCH=amd64 CGO_ENABLED=${CGO_ENABLED} go build -o build/macos/${BINARY} ${BUILD_FLAGS} -ldflags "$(LDFLAGS)" $(SOURCES)
docker: ${DOCKERDIR}/${DOCKERFILE}
docker: $(GENERATED_CRDS) ${DOCKERDIR}/${DOCKERFILE}
echo `(env)`
echo "Tag ${TAG}"
echo "Version ${VERSION}"

View File

@ -101,6 +101,11 @@ spec:
kubernetes_use_configmaps:
type: boolean
default: false
maintenance_windows:
items:
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
type: string
type: array
max_instances:
type: integer
description: "-1 = disabled"

View File

@ -49,6 +49,10 @@ configGeneral:
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
# kubernetes_use_configmaps: false
# maintenance windows applied to all Postgres clusters unless overridden in the manifest
# maintenance_windows:
# - "Sun:01:00-06:00"
# min number of instances in Postgres cluster. -1 = no limit
min_instances: -1
# max number of instances in Postgres cluster. -1 = no limit

View File

@ -116,9 +116,9 @@ These parameters are grouped directly under the `spec` key in the manifest.
* **maintenanceWindows**
a list which defines specific time frames when certain maintenance operations
such as automatic major upgrades or master pod migration. Accepted formats
are "01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for specific
days, with all times in UTC.
such as automatic major upgrades or master pod migration are allowed to happen.
Accepted formats are "01:00-06:00" for daily maintenance windows or
"Sat:00:00-04:00" for specific days, with all times in UTC.
* **users**
a map of usernames to user flags for the users that should be created in the

View File

@ -173,6 +173,14 @@ Those are top-level keys, containing both leaf keys and groups.
the thresholds. The value must be `"true"` to be effective. The default is empty
which means the feature is disabled.
* **maintenance_windows**
a list which defines specific time frames when certain maintenance
operations such as automatic major upgrades or master pod migration are
allowed to happen for all database clusters. Accepted formats are
"01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for
specific days, with all times in UTC. Locally defined maintenance
windows take precedence over globally configured ones.
* **resync_period**
period between consecutive sync requests. The default is `30m`.

View File

@ -7,7 +7,7 @@ set -o pipefail
IFS=$'\n\t'
readonly cluster_name="postgres-operator-e2e-tests"
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
readonly kubeconfig_path="${HOME}/kind-config-${cluster_name}"
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3"
readonly e2e_test_runner_image="ghcr.io/zalando/postgres-operator-e2e-tests-runner:latest"
@ -19,11 +19,17 @@ echo "Kubeconfig path: ${kubeconfig_path}"
function pull_images(){
operator_tag=$(git describe --tags --always --dirty)
if [[ -z $(docker images -q ghcr.io/zalando/postgres-operator:${operator_tag}) ]]
image_name="ghcr.io/zalando/postgres-operator:${operator_tag}"
if [[ -z $(docker images -q "${image_name}") ]]
then
docker pull ghcr.io/zalando/postgres-operator:latest
if ! docker pull "${image_name}"
then
echo "Failed to pull operator image: ${image_name}"
exit 1
fi
fi
operator_image=$(docker images --filter=reference="ghcr.io/zalando/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1)
operator_image="${image_name}"
echo "Using operator image: ${operator_image}"
}
function start_kind(){
@ -52,7 +58,7 @@ function set_kind_api_server_ip(){
# but update the IP address of the API server to the one from the Docker 'bridge' network
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
sed "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}" > "${kubeconfig_path}".tmp && mv "${kubeconfig_path}".tmp "${kubeconfig_path}"
}
function generate_certificate(){

2
go.mod
View File

@ -18,6 +18,7 @@ require (
k8s.io/apiextensions-apiserver v0.32.9
k8s.io/apimachinery v0.32.9
k8s.io/client-go v0.32.9
sigs.k8s.io/yaml v1.4.0
)
require (
@ -77,7 +78,6 @@ require (
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
tool (

View File

@ -102,6 +102,7 @@ data:
logical_backup_s3_sse: "AES256"
logical_backup_s3_retention_time: ""
logical_backup_schedule: "30 00 * * *"
# maintenance_windows: "Sat:22:00-23:59,Sun:00:00-01:00"
major_version_upgrade_mode: "manual"
# major_version_upgrade_team_allow_list: ""
master_dns_name_format: "{cluster}.{namespace}.{hostedzone}"

View File

@ -99,6 +99,11 @@ spec:
kubernetes_use_configmaps:
type: boolean
default: false
maintenance_windows:
items:
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
type: string
type: array
max_instances:
type: integer
description: "-1 = disabled"

View File

@ -16,6 +16,9 @@ configuration:
# ignore_instance_limits_annotation_key: ""
# ignore_resources_limits_annotation_key: ""
# kubernetes_use_configmaps: false
# maintenance_windows:
# - "Sat:22:00-23:59"
# - "Sun:00:00-01:00"
max_instances: -1
min_instances: -1
resync_period: 30m

View File

@ -127,6 +127,15 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
"kubernetes_use_configmaps": {
Type: "boolean",
},
"maintenance_windows": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "string",
Pattern: "^\\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\\d):([0-5]?\\d)|(2[0-3]|[01]?\\d):([0-5]?\\d))\\ *$",
},
},
},
"max_instances": {
Type: "integer",
Description: "-1 = disabled",

View File

@ -31,7 +31,8 @@ func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error {
err error
)
parts := strings.Split(string(data[1:len(data)-1]), "-")
dataStr := strings.Trim(string(data), "\"")
parts := strings.Split(dataStr, "-")
if len(parts) != 2 {
return fmt.Errorf("incorrect maintenance window format")
}

View File

@ -266,6 +266,7 @@ type OperatorConfigurationData struct {
Workers uint32 `json:"workers,omitempty"`
ResyncPeriod Duration `json:"resync_period,omitempty"`
RepairPeriod Duration `json:"repair_period,omitempty"`
MaintenanceWindows []MaintenanceWindow `json:"maintenance_windows,omitempty"`
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers
@ -285,10 +286,9 @@ type OperatorConfigurationData struct {
ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"`
Patroni PatroniConfiguration `json:"patroni"`
MinInstances int32 `json:"min_instances,omitempty"`
MaxInstances int32 `json:"max_instances,omitempty"`
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
MinInstances int32 `json:"min_instances,omitempty"`
MaxInstances int32 `json:"max_instances,omitempty"`
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
IgnoreResourcesLimitsAnnotationKey string `json:"ignore_resources_limits_annotation_key,omitempty"`
}

View File

@ -91,6 +91,13 @@ var maintenanceWindows = []struct {
StartTime: mustParseTime("10:00"),
EndTime: mustParseTime("20:00"),
}, nil},
{"regular every day scenario",
[]byte(`"05:00-07:00"`),
MaintenanceWindow{
Everyday: true,
StartTime: mustParseTime("05:00"),
EndTime: mustParseTime("07:00"),
}, nil},
{"starts and ends at the same time",
[]byte(`"Mon:10:00-10:00"`),
MaintenanceWindow{

View File

@ -433,6 +433,13 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MaintenanceWindows != nil {
in, out := &in.MaintenanceWindows, &out.MaintenanceWindows
*out = make([]MaintenanceWindow, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ShmVolume != nil {
in, out := &in.ShmVolume, &out.ShmVolume
*out = new(bool)

View File

@ -32,6 +32,7 @@ import (
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
@ -271,26 +272,29 @@ func (c *Cluster) Create() (err error) {
)
defer func() {
var (
pgUpdatedStatus *acidv1.Postgresql
errStatus error
)
if err == nil {
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running?
} else {
currentStatus := c.Status.DeepCopy()
pg := c.Postgresql.DeepCopy()
pg.Status.PostgresClusterStatus = acidv1.ClusterStatusRunning
if err != nil {
c.logger.Warningf("cluster created failed: %v", err)
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed)
pg.Status.PostgresClusterStatus = acidv1.ClusterStatusAddFailed
}
if errStatus != nil {
c.logger.Warningf("could not set cluster status: %v", errStatus)
return
}
if pgUpdatedStatus != nil {
if !equality.Semantic.DeepEqual(currentStatus, pg.Status) {
pgUpdatedStatus, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), pg)
if err != nil {
c.logger.Warningf("could not set cluster status: %v", err)
return
}
c.setSpec(pgUpdatedStatus)
}
}()
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating)
pg := c.Postgresql.DeepCopy()
pg.Status.PostgresClusterStatus = acidv1.ClusterStatusCreating
pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), pg)
if err != nil {
return fmt.Errorf("could not set cluster status: %v", err)
}
@ -978,29 +982,33 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
c.mu.Lock()
defer c.mu.Unlock()
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating)
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusUpdating
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
newSpec, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), newSpec)
if err != nil {
return fmt.Errorf("could not set cluster status to updating: %w", err)
}
if !c.isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
// do not apply any major version related changes yet
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
}
c.setSpec(newSpec)
defer func() {
var (
pgUpdatedStatus *acidv1.Postgresql
err error
)
currentStatus := newSpec.Status.DeepCopy()
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusRunning
if updateFailed {
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed)
} else {
pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusUpdateFailed
}
if err != nil {
c.logger.Warningf("could not set cluster status: %v", err)
return
}
if pgUpdatedStatus != nil {
if !equality.Semantic.DeepEqual(currentStatus, newSpec.Status) {
pgUpdatedStatus, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), newSpec)
if err != nil {
c.logger.Warningf("could not set cluster status: %v", err)
return
}
c.setSpec(pgUpdatedStatus)
}
}()

View File

@ -197,7 +197,7 @@ func (c *Cluster) majorVersionUpgrade() error {
return nil
}
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
if !c.isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
c.logger.Infof("skipping major version upgrade, not in maintenance window")
return nil
}

View File

@ -280,7 +280,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
}
scheduleSwitchover := false
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
if !c.isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
c.logger.Infof("postponing switchover, not in maintenance window")
scheduleSwitchover = true
}

View File

@ -20,6 +20,7 @@ import (
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
@ -43,21 +44,19 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
c.setSpec(newSpec)
defer func() {
var (
pgUpdatedStatus *acidv1.Postgresql
errStatus error
)
if err != nil {
c.logger.Warningf("error while syncing cluster state: %v", err)
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed)
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusSyncFailed
} else if !c.Status.Running() {
pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning)
newSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusRunning
}
if errStatus != nil {
c.logger.Warningf("could not set cluster status: %v", errStatus)
return
}
if pgUpdatedStatus != nil {
if !equality.Semantic.DeepEqual(oldSpec.Status, newSpec.Status) {
pgUpdatedStatus, err := c.KubeClient.SetPostgresCRDStatus(c.clusterName(), newSpec)
if err != nil {
c.logger.Warningf("could not set cluster status: %v", err)
return
}
c.setSpec(pgUpdatedStatus)
}
}()
@ -98,7 +97,7 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
}
}
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
if !c.isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
// do not apply any major version related changes yet
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
}

View File

@ -663,15 +663,28 @@ func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (ac
return resources, nil
}
func isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool {
if len(specMaintenanceWindows) == 0 {
func (c *Cluster) isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool {
if len(specMaintenanceWindows) == 0 && len(c.OpConfig.MaintenanceWindows) == 0 {
return true
}
now := time.Now()
currentDay := now.Weekday()
currentTime := now.Format("15:04")
for _, window := range specMaintenanceWindows {
maintenanceWindows := specMaintenanceWindows
if len(maintenanceWindows) == 0 {
maintenanceWindows = make([]acidv1.MaintenanceWindow, 0, len(c.OpConfig.MaintenanceWindows))
for _, windowStr := range c.OpConfig.MaintenanceWindows {
var window acidv1.MaintenanceWindow
if err := window.UnmarshalJSON([]byte(windowStr)); err != nil {
c.logger.Errorf("could not parse default maintenance window %q: %v", windowStr, err)
continue
}
maintenanceWindows = append(maintenanceWindows, window)
}
}
for _, window := range maintenanceWindows {
startTime := window.StartTime.Format("15:04")
endTime := window.EndTime.Format("15:04")

View File

@ -288,6 +288,12 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
},
}
// add postgresql cluster to fake client
_, err := client.PostgresqlsGetter.Postgresqls(namespace).Create(context.TODO(), &pg, metav1.CreateOptions{})
if err != nil {
return nil, err
}
cluster := New(
Config{
OpConfig: config.Config{
@ -321,7 +327,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
}, client, pg, logger, eventRecorder)
cluster.Name = clusterName
cluster.Namespace = namespace
_, err := cluster.createStatefulSet()
_, err = cluster.createStatefulSet()
if err != nil {
return nil, err
}
@ -651,6 +657,22 @@ func Test_trimCronjobName(t *testing.T) {
}
func TestIsInMaintenanceWindow(t *testing.T) {
cluster := New(
Config{
OpConfig: config.Config{
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
cluster.Name = clusterName
cluster.Namespace = namespace
now := time.Now()
futureTimeStart := now.Add(1 * time.Hour)
futureTimeStartFormatted := futureTimeStart.Format("15:04")
@ -658,14 +680,16 @@ func TestIsInMaintenanceWindow(t *testing.T) {
futureTimeEndFormatted := futureTimeEnd.Format("15:04")
tests := []struct {
name string
windows []acidv1.MaintenanceWindow
expected bool
name string
windows []acidv1.MaintenanceWindow
configWindows []string
expected bool
}{
{
name: "no maintenance windows",
windows: nil,
expected: true,
name: "no maintenance windows",
windows: nil,
configWindows: nil,
expected: true,
},
{
name: "maintenance windows with everyday",
@ -676,7 +700,8 @@ func TestIsInMaintenanceWindow(t *testing.T) {
EndTime: mustParseTime("23:59"),
},
},
expected: true,
configWindows: nil,
expected: true,
},
{
name: "maintenance windows with weekday",
@ -687,7 +712,8 @@ func TestIsInMaintenanceWindow(t *testing.T) {
EndTime: mustParseTime("23:59"),
},
},
expected: true,
configWindows: nil,
expected: true,
},
{
name: "maintenance windows with future interval time",
@ -700,12 +726,25 @@ func TestIsInMaintenanceWindow(t *testing.T) {
},
expected: false,
},
{
name: "global maintenance windows with future interval time",
windows: nil,
configWindows: []string{fmt.Sprintf("%s-%s", futureTimeStartFormatted, futureTimeEndFormatted)},
expected: false,
},
{
name: "global maintenance windows all day",
windows: nil,
configWindows: []string{"00:00-02:00", "02:00-23:59"},
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cluster.OpConfig.MaintenanceWindows = tt.configWindows
cluster.Spec.MaintenanceWindows = tt.windows
if isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected {
if cluster.isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected {
t.Errorf("Expected isInMaintenanceWindow to return %t", tt.expected)
}
})

View File

@ -51,6 +51,16 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.ShmVolume = util.CoalesceBool(fromCRD.ShmVolume, util.True())
result.SidecarImages = fromCRD.SidecarImages
result.SidecarContainers = fromCRD.SidecarContainers
if len(fromCRD.MaintenanceWindows) > 0 {
result.MaintenanceWindows = make([]string, 0, len(fromCRD.MaintenanceWindows))
for _, window := range fromCRD.MaintenanceWindows {
w, err := window.MarshalJSON()
if err != nil {
panic(fmt.Errorf("could not marshal configured maintenance window: %v", err))
}
result.MaintenanceWindows = append(result.MaintenanceWindows, string(w))
}
}
// user config
result.SuperUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.SuperUsername, "postgres")

View File

@ -161,7 +161,8 @@ func (c *Controller) acquireInitialListOfClusters() error {
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) (*cluster.Cluster, error) {
if c.opConfig.EnableTeamIdClusternamePrefix {
if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil {
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid)
pgSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusInvalid
c.KubeClient.SetPostgresCRDStatus(clusterName, pgSpec)
return nil, err
}
}
@ -470,13 +471,25 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.
switch eventType {
case EventAdd:
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusAddFailed)
informerNewSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusAddFailed
_, err := c.KubeClient.SetPostgresCRDStatus(clusterName, informerNewSpec)
if err != nil {
c.logger.WithField("cluster-name", clusterName).Errorf("could not set PostgresCRD status: %v", err)
}
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Create", "%v", clusterError)
case EventUpdate:
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusUpdateFailed)
informerNewSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusUpdateFailed
_, err := c.KubeClient.SetPostgresCRDStatus(clusterName, informerNewSpec)
if err != nil {
c.logger.WithField("cluster-name", clusterName).Errorf("could not set PostgresCRD status: %v", err)
}
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Update", "%v", clusterError)
default:
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusSyncFailed)
informerNewSpec.Status.PostgresClusterStatus = acidv1.ClusterStatusSyncFailed
_, err := c.KubeClient.SetPostgresCRDStatus(clusterName, informerNewSpec)
if err != nil {
c.logger.WithField("cluster-name", clusterName).Errorf("could not set PostgresCRD status: %v", err)
}
c.eventRecorder.Eventf(c.GetReference(informerNewSpec), v1.EventTypeWarning, "Sync", "%v", clusterError)
}

View File

@ -63,10 +63,9 @@ type Resources struct {
NodeReadinessLabelMerge string `name:"node_readiness_label_merge" default:"OR"`
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
MaxInstances int32 `name:"max_instances" default:"-1"`
MinInstances int32 `name:"min_instances" default:"-1"`
IgnoreInstanceLimitsAnnotationKey string `name:"ignore_instance_limits_annotation_key"`
MaxInstances int32 `name:"max_instances" default:"-1"`
MinInstances int32 `name:"min_instances" default:"-1"`
IgnoreInstanceLimitsAnnotationKey string `name:"ignore_instance_limits_annotation_key"`
IgnoreResourcesLimitsAnnotationKey string `name:"ignore_resources_limits_annotation_key"`
}
@ -178,6 +177,7 @@ type Config struct {
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-17:4.0-p3"`
MaintenanceWindows []string `name:"maintenance_windows"`
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
SidecarContainers []v1.Container `name:"sidecars"`
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`

View File

@ -191,24 +191,8 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
}
// SetPostgresCRDStatus of Postgres cluster
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string) (*apiacidv1.Postgresql, error) {
var pg *apiacidv1.Postgresql
var pgStatus apiacidv1.PostgresStatus
pgStatus.PostgresClusterStatus = status
patch, err := json.Marshal(struct {
PgStatus interface{} `json:"status"`
}{&pgStatus})
if err != nil {
return pg, fmt.Errorf("could not marshal status: %v", err)
}
// we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ),
// however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11)
// we should take advantage of it.
pg, err = client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).Patch(
context.TODO(), clusterName.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status")
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, pg *apiacidv1.Postgresql) (*apiacidv1.Postgresql, error) {
pg, err := client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).UpdateStatus(context.TODO(), pg, metav1.UpdateOptions{})
if err != nil {
return pg, fmt.Errorf("could not update status: %v", err)
}