diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 95ce6b091..290d4ff92 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -6,6 +6,7 @@ import ( "fmt" "reflect" "regexp" + "strconv" "strings" "time" @@ -425,7 +426,7 @@ func (c *Cluster) syncStatefulSet() error { } } - // restart instances if requiredy + // restart instances if required remainingPods := make([]*v1.Pod, 0) skipRole := Master if restartMasterFirst { @@ -572,11 +573,18 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, patroniC effectiveValue := effectivePgParameters[desiredOption] if isBootstrapOnlyParameter(desiredOption) && (effectiveValue != desiredValue) { parametersToSet[desiredOption] = desiredValue - if util.SliceContains(requireMasterRestartWhenDecreased, desiredOption) && (effectiveValue > desiredValue) { - restartMaster = append(restartMaster, true) - } else { - restartMaster = append(restartMaster, false) + if util.SliceContains(requireMasterRestartWhenDecreased, desiredOption) { + effectiveValueNum, errConv := strconv.Atoi(effectiveValue) + desiredValueNum, errConv2 := strconv.Atoi(desiredValue) + if errConv != nil || errConv2 != nil { + continue + } + if effectiveValueNum > desiredValueNum { + restartMaster = append(restartMaster, true) + continue + } } + restartMaster = append(restartMaster, false) } } diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index e6f23914b..c6e2a8357 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -1,22 +1,40 @@ package cluster import ( + "bytes" + "io/ioutil" + "net/http" "testing" "time" "context" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "github.com/golang/mock/gomock" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/zalando/postgres-operator/mocks" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "github.com/zalando/postgres-operator/pkg/util/patroni" "k8s.io/client-go/kubernetes/fake" ) +var patroniLogger = logrus.New().WithField("test", "patroni") + +func newMockPod(ip string) *v1.Pod { + return &v1.Pod{ + Status: v1.PodStatus{ + PodIP: ip, + }, + } +} + func newFakeK8sSyncClient() (k8sutil.KubernetesClient, *fake.Clientset) { acidClientSet := fakeacidv1.NewSimpleClientset() clientSet := fake.NewSimpleClientset() @@ -113,3 +131,134 @@ func TestSyncStatefulSetsAnnotations(t *testing.T) { t.Errorf("%s: inherited annotation not found in desired statefulset: %#v", testName, desiredSts.Annotations) } } + +func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) { + testName := "test config comparison" + client, _ := newFakeK8sSyncClient() + clusterName := "acid-test-cluster" + namespace := "default" + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Patroni: acidv1.Patroni{ + TTL: 20, + }, + PostgresqlParam: acidv1.PostgresqlParam{ + Parameters: map[string]string{ + "log_min_duration_statement": "200", + "max_connections": "50", + }, + }, + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + ResourceCheckInterval: time.Duration(3), + ResourceCheckTimeout: time.Duration(10), + }, + }, + }, client, pg, logger, eventRecorder) + + // mocking a config after setConfig is called + configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}` + r := ioutil.NopCloser(bytes.NewReader([]byte(configJson))) + + response := http.Response{ + StatusCode: 200, + Body: r, + } + + mockClient := mocks.NewMockHTTPClient(ctrl) + mockClient.EXPECT().Do(gomock.Any()).Return(&response, nil).AnyTimes() + + p := patroni.New(patroniLogger, mockClient) + cluster.patroni = p + mockPod := newMockPod("192.168.100.1") + + // simulate existing config that differs with cluster.Spec + tests := []struct { + subtest string + pod *v1.Pod + patroni acidv1.Patroni + pgParams map[string]string + restartMaster bool + }{ + { + subtest: "Patroni and Postgresql.Parameters differ - restart replica first", + pod: mockPod, + patroni: acidv1.Patroni{ + TTL: 30, // desired 20 + }, + pgParams: map[string]string{ + "log_min_duration_statement": "500", // desired 200 + "max_connections": "100", // desired 50 + }, + restartMaster: false, + }, + { + subtest: "multiple Postgresql.Parameters differ - restart replica first", + pod: mockPod, + patroni: acidv1.Patroni{ + TTL: 20, + }, + pgParams: map[string]string{ + "log_min_duration_statement": "500", // desired 200 + "max_connections": "100", // desired 50 + }, + restartMaster: false, + }, + { + subtest: "desired max_connections bigger - restart replica first", + pod: mockPod, + patroni: acidv1.Patroni{ + TTL: 20, + }, + pgParams: map[string]string{ + "log_min_duration_statement": "200", + "max_connections": "30", // desired 50 + }, + restartMaster: false, + }, + { + subtest: "desired max_connections smaller - restart master first", + pod: mockPod, + patroni: acidv1.Patroni{ + TTL: 20, + }, + pgParams: map[string]string{ + "log_min_duration_statement": "200", + "max_connections": "100", // desired 50 + }, + restartMaster: true, + }, + } + + for _, tt := range tests { + requireMasterRestart, err := cluster.checkAndSetGlobalPostgreSQLConfiguration(tt.pod, tt.patroni, tt.pgParams) + assert.NoError(t, err) + if requireMasterRestart != tt.restartMaster { + t.Errorf("%s - %s: unexpect master restart strategy, got %v, expected %v", testName, tt.subtest, requireMasterRestart, tt.restartMaster) + } + } +}