remove TestSyncStatefulSetNonRunningPodsDoNotBlockRecreatio

This commit is contained in:
Annie Li 2026-03-06 13:16:44 -08:00
parent c5142ee5ec
commit d47b666870
No known key found for this signature in database
GPG Key ID: 03ABEE8E6189E333
2 changed files with 1 additions and 249 deletions

View File

@ -1,118 +1,3 @@
package cluster
import (
"bytes"
"fmt"
"io"
"net/http"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/zalando/postgres-operator/mocks"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
"github.com/zalando/postgres-operator/pkg/util/patroni"
)
func TestGetSwitchoverCandidate(t *testing.T) {
testName := "test getting right switchover candidate"
namespace := "default"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var cluster = New(
Config{
OpConfig: config.Config{
PatroniAPICheckInterval: time.Duration(1),
PatroniAPICheckTimeout: time.Duration(5),
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
// simulate different member scenarios
tests := []struct {
subtest string
clusterJson string
syncModeEnabled bool
expectedCandidate spec.NamespacedName
expectedError error
}{
{
subtest: "choose sync_standby over replica",
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "sync_standby", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}, {"name": "acid-test-cluster-2", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 0}]}`,
syncModeEnabled: true,
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"},
expectedError: nil,
},
{
subtest: "no running sync_standby available",
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}]}`,
syncModeEnabled: true,
expectedCandidate: spec.NamespacedName{},
expectedError: fmt.Errorf("failed to get Patroni cluster members: unexpected end of JSON input"),
},
{
subtest: "choose replica with lowest lag",
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 2}]}`,
syncModeEnabled: false,
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-2"},
expectedError: nil,
},
{
subtest: "choose first replica when lag is equal everywhere",
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 5}]}`,
syncModeEnabled: false,
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"},
expectedError: nil,
},
{
subtest: "no running replica available",
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 2}, {"name": "acid-test-cluster-1", "role": "replica", "state": "starting", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 2}]}`,
syncModeEnabled: false,
expectedCandidate: spec.NamespacedName{},
expectedError: fmt.Errorf("failed to get Patroni cluster members: unexpected end of JSON input"),
},
{
subtest: "replicas with different status",
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "in archive recovery", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 2}]}`,
syncModeEnabled: false,
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-2"},
expectedError: nil,
},
}
for _, tt := range tests {
// mocking cluster members
r := io.NopCloser(bytes.NewReader([]byte(tt.clusterJson)))
response := http.Response{
StatusCode: 200,
Body: r,
}
mockClient := mocks.NewMockHTTPClient(ctrl)
mockClient.EXPECT().Get(gomock.Any()).Return(&response, nil).AnyTimes()
p := patroni.New(patroniLogger, mockClient)
cluster.patroni = p
mockMasterPod := newMockPod("192.168.100.1")
mockMasterPod.Namespace = namespace
cluster.Spec.Patroni.SynchronousMode = tt.syncModeEnabled
candidate, err := cluster.getSwitchoverCandidate(mockMasterPod)
if err != nil && err.Error() != tt.expectedError.Error() {
t.Errorf("%s - %s: unexpected error, %v", testName, tt.subtest, err)
}
if candidate != tt.expectedCandidate {
t.Errorf("%s - %s: unexpect switchover candidate, got %s, expected %s", testName, tt.subtest, candidate, tt.expectedCandidate)
}
}
}
func TestPodIsNotRunning(t *testing.T) {
tests := []struct {
subtest string
@ -200,7 +85,7 @@ func TestPodIsNotRunning(t *testing.T) {
expected: true,
},
{
subtest: "pod running with mixed container states - one healthy one broken",
subtest: "pod running with mixed container states",
pod: v1.Pod{
Status: v1.PodStatus{
Phase: v1.PodRunning,

View File

@ -1053,136 +1053,3 @@ func TestUpdateSecretNameConflict(t *testing.T) {
expectedError := fmt.Sprintf("syncing secret %s failed: error while checking for password rotation: could not update secret because of user name mismatch", "default/prepared-owner-user.acid-test-cluster.credentials")
assert.Contains(t, err.Error(), expectedError)
}
func TestSyncStatefulSetNonRunningPodsDoNotBlockRecreation(t *testing.T) {
testName := "test that non-running pods do not block rolling update"
client, _ := newFakeK8sSyncClient()
clusterName := "acid-test-cluster"
namespace := "default"
ctrl := gomock.NewController(t)
defer ctrl.Finish()
pg := acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
NumberOfInstances: 1,
Volume: acidv1.Volume{
Size: "1Gi",
},
},
}
var cluster = New(
Config{
OpConfig: config.Config{
PatroniAPICheckInterval: time.Duration(1),
PatroniAPICheckTimeout: time.Duration(5),
PodManagementPolicy: "ordered_ready",
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
PodRoleLabel: "spilo-role",
ResourceCheckInterval: time.Duration(3),
ResourceCheckTimeout: time.Duration(10),
},
},
}, client, pg, logger, eventRecorder)
cluster.Name = clusterName
cluster.Namespace = namespace
// mock Patroni API that always fails (simulates unreachable pod)
mockClient := mocks.NewMockHTTPClient(ctrl)
mockClient.EXPECT().Do(gomock.Any()).Return(nil, fmt.Errorf("connection refused")).AnyTimes()
mockClient.EXPECT().Get(gomock.Any()).Return(nil, fmt.Errorf("connection refused")).AnyTimes()
cluster.patroni = patroni.New(patroniLogger, mockClient)
// test allPodsRunning with non-running pods returns false
nonRunningPods := []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
Reason: "CreateContainerConfigError",
Message: `secret "old-secret" not found`,
},
},
},
},
},
},
}
if cluster.allPodsRunning(nonRunningPods) {
t.Errorf("%s: allPodsRunning should return false for pods in CreateContainerConfigError", testName)
}
// test allPodsRunning with running pods returns true
runningPods := []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
},
},
},
},
}
if !cluster.allPodsRunning(runningPods) {
t.Errorf("%s: allPodsRunning should return true for running pods", testName)
}
// test mixed: 3-node cluster with 1 broken replica
mixedPods := []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
{State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}},
},
},
},
{
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
{State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}},
},
},
},
{
Status: v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
Reason: "CreateContainerConfigError",
},
},
},
},
},
},
}
if cluster.allPodsRunning(mixedPods) {
t.Errorf("%s: allPodsRunning should return false when one pod is in CreateContainerConfigError", testName)
}
}