Improvements in tests

- Fixed the issue with failing test cases
- Add more test cases for replica connection pooler
- Added docs about the new flag
This commit is contained in:
Rafia Sabih 2020-09-08 17:32:47 +02:00
parent b3dbac5b81
commit 770fc1e612
5 changed files with 69 additions and 63 deletions

View File

@ -141,10 +141,15 @@ These parameters are grouped directly under the `spec` key in the manifest.
configured (so you can override the operator configuration). Optional.
* **enableConnectionPooler**
Tells the operator to create a connection pooler with a database. If this
field is true, a connection pooler deployment will be created even if
Tells the operator to create a connection pooler with a database for the master
service. If this field is true, a connection pooler deployment will be created even if
`connectionPooler` section is empty. Optional, not set by default.
* **enableReplicaConnectionPooler**
Tells the operator to create a connection pooler with a database for the replica
service. If this field is true, a connection pooler deployment for replica
will be created even if `connectionPooler` section is empty. Optional, not set by default.
* **enableLogicalBackup**
Determines if the logical backup of this cluster should be taken and uploaded
to S3. Default: false. Optional.

View File

@ -737,11 +737,17 @@ manifest:
```yaml
spec:
enableConnectionPooler: true
enableReplicaConnectionPooler: true
```
This will tell the operator to create a connection pooler with default
configuration, through which one can access the master via a separate service
`{cluster-name}-pooler`. In most of the cases the
`{cluster-name}-pooler`. With the first option, connection pooler for master service
is created and with the second option, connection pooler for replica is created.
Note that both of these flags are independent of each other and user can set or
unset any of them as per their requirements without any effect on the other.
In most of the cases the
[default configuration](reference/operator_parameters.md#connection-pooler-configuration)
should be good enough. To configure a new connection pooler individually for
each Postgres cluster, specify:

View File

@ -929,7 +929,7 @@ func TestPodEnvironmentSecretVariables(t *testing.T) {
}
func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error {
cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"]
if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest {
return fmt.Errorf("CPU request doesn't match, got %s, expected %s",
@ -957,18 +957,18 @@ func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
return nil
}
func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error {
poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"]
if poolerLabels != cluster.connectionPoolerLabelsSelector(Master).MatchLabels["connection-pooler"] {
if poolerLabels != cluster.connectionPoolerLabelsSelector(role).MatchLabels["connection-pooler"] {
return fmt.Errorf("Pod labels do not match, got %+v, expected %+v",
podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector(Master).MatchLabels)
podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector(role).MatchLabels)
}
return nil
}
func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error {
required := map[string]bool{
"PGHOST": false,
"PGPORT": false,
@ -1034,14 +1034,14 @@ func TestConnectionPoolerPodSpec(t *testing.T) {
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { return nil }
noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil }
tests := []struct {
subTest string
spec *acidv1.PostgresSpec
expected error
cluster *Cluster
check func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error
check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error
}{
{
subTest: "default configuration",
@ -1073,7 +1073,8 @@ func TestConnectionPoolerPodSpec(t *testing.T) {
{
subTest: "labels for service",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
expected: nil,
cluster: cluster,
@ -1089,20 +1090,23 @@ func TestConnectionPoolerPodSpec(t *testing.T) {
check: testEnvs,
},
}
for _, tt := range tests {
podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec, Master)
for _, role := range [2]PostgresRole{Master, Replica} {
for _, tt := range tests {
podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec, role)
if err != tt.expected && err.Error() != tt.expected.Error() {
t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v",
testName, tt.subTest, err, tt.expected)
}
if err != tt.expected && err.Error() != tt.expected.Error() {
t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v",
testName, tt.subTest, err, tt.expected)
}
err = tt.check(cluster, podSpec)
if err != nil {
t.Errorf("%s [%s]: Pod spec is incorrect, %+v",
testName, tt.subTest, err)
err = tt.check(cluster, podSpec, role)
if err != nil {
t.Errorf("%s [%s]: Pod spec is incorrect, %+v",
testName, tt.subTest, err)
}
}
}
}
func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployment) error {
@ -1166,7 +1170,8 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
{
subTest: "default configuration",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
expected: nil,
cluster: cluster,
@ -1175,7 +1180,8 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
{
subTest: "owner reference",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
expected: nil,
cluster: cluster,
@ -1184,7 +1190,8 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
{
subTest: "selector",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
expected: nil,
cluster: cluster,
@ -1205,9 +1212,10 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
testName, tt.subTest, err)
}
}
}
func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error {
func testServiceOwnwerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error {
owner := service.ObjectMeta.OwnerReferences[0]
if owner.Name != cluster.Statefulset.ObjectMeta.Name {
@ -1218,12 +1226,12 @@ func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error {
return nil
}
func testServiceSelector(cluster *Cluster, service *v1.Service) error {
func testServiceSelector(cluster *Cluster, service *v1.Service, role PostgresRole) error {
selector := service.Spec.Selector
if selector["connection-pooler"] != cluster.connectionPoolerName(Master) {
if selector["connection-pooler"] != cluster.connectionPoolerName(role) {
return fmt.Errorf("Selector is incorrect, got %s, expected %s",
selector["connection-pooler"], cluster.connectionPoolerName(Master))
selector["connection-pooler"], cluster.connectionPoolerName(role))
}
return nil
@ -1253,7 +1261,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
},
}
noCheck := func(cluster *Cluster, deployment *v1.Service) error {
noCheck := func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error {
return nil
}
@ -1261,7 +1269,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
subTest string
spec *acidv1.PostgresSpec
cluster *Cluster
check func(cluster *Cluster, deployment *v1.Service) error
check func(cluster *Cluster, deployment *v1.Service, role PostgresRole) error
}{
{
subTest: "default configuration",
@ -1282,18 +1290,21 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
{
subTest: "selector",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
},
cluster: cluster,
check: testServiceSelector,
},
}
for _, tt := range tests {
service := tt.cluster.generateConnectionPoolerService(tt.spec, Master)
for _, role := range [2]PostgresRole{Master, Replica} {
for _, tt := range tests {
service := tt.cluster.generateConnectionPoolerService(tt.spec, role)
if err := tt.check(cluster, service); err != nil {
t.Errorf("%s [%s]: Service spec is incorrect, %+v",
testName, tt.subTest, err)
if err := tt.check(cluster, service, role); err != nil {
t.Errorf("%s [%s]: Service spec is incorrect, %+v",
testName, tt.subTest, err)
}
}
}
}

View File

@ -847,10 +847,7 @@ func (c *Cluster) syncConnectionPooler(oldSpec,
var err error
var newNeedConnectionPooler, oldNeedConnectionPooler bool
if c.ConnectionPooler == nil {
c.ConnectionPooler = &ConnectionPoolerObjects{}
}
// Check and perform the sync requirements for each of the roles.
for _, role := range [2]PostgresRole{Master, Replica} {
if role == Master {
newNeedConnectionPooler = c.needMasterConnectionPoolerWorker(&newSpec.Spec)
@ -859,6 +856,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec,
newNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&newSpec.Spec)
oldNeedConnectionPooler = c.needReplicaConnectionPoolerWorker(&oldSpec.Spec)
}
if c.ConnectionPooler == nil {
c.ConnectionPooler = &ConnectionPoolerObjects{}
}
if newNeedConnectionPooler {
// Try to sync in any case. If we didn't needed connection pooler before,
@ -902,27 +902,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec,
if oldNeedConnectionPooler && !newNeedConnectionPooler {
// delete and cleanup resources
if role == Master {
if c.ConnectionPooler != nil &&
(c.ConnectionPooler.Deployment != nil ||
c.ConnectionPooler.Service != nil) {
if err = c.deleteConnectionPooler(role); err != nil {
c.logger.Warningf("could not remove connection pooler: %v", err)
}
}
} else {
if c.ConnectionPooler != nil &&
(c.ConnectionPooler.ReplDeployment != nil ||
c.ConnectionPooler.ReplService != nil) {
if err = c.deleteConnectionPooler(role); err != nil {
c.logger.Warningf("could not remove connection pooler: %v", err)
}
}
if err = c.deleteConnectionPooler(role); err != nil {
c.logger.Warningf("could not remove connection pooler: %v", err)
}
}
@ -937,6 +918,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec,
c.logger.Warningf("could not remove connection pooler: %v", err)
}
} else if c.ConnectionPooler.ReplDeployment == nil && c.ConnectionPooler.ReplService == nil {
c.ConnectionPooler = nil
}
} else {
if c.ConnectionPooler != nil &&
@ -946,6 +929,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec,
if err = c.deleteConnectionPooler(role); err != nil {
c.logger.Warningf("could not remove connection pooler: %v", err)
}
} else if c.ConnectionPooler.Deployment == nil && c.ConnectionPooler.Service == nil {
c.ConnectionPooler = nil
}
}
}
@ -995,6 +980,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
if role == Master {
c.ConnectionPooler.Deployment = deployment
} else {
c.ConnectionPooler.ReplDeployment = deployment
}
// actual synchronization

View File

@ -225,9 +225,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaConnectionPooler: boolToPointer(true),
EnableConnectionPooler: boolToPointer(false),
},
},
cluster: clusterMock,