Assorted changes

- Update deleteConnectionPooler to include role
- Rename EnableMasterConnectionPooler back to original name for backward
  compatiility
- other minor chnages and code improvements
This commit is contained in:
Rafia Sabih 2020-09-03 16:01:25 +02:00
parent 503082cf1a
commit 1814342dc3
17 changed files with 54 additions and 53 deletions

View File

@ -185,7 +185,7 @@ spec:
# Note: usernames specified here as database owners must be declared in the users key of the spec key.
dockerImage:
type: string
enableMasterConnectionPooler:
enableConnectionPooler:
type: boolean
enableReplicaConnectionPooler:
type: boolean

View File

@ -140,7 +140,7 @@ These parameters are grouped directly under the `spec` key in the manifest.
is `false`, then no volume will be mounted no matter how operator was
configured (so you can override the operator configuration). Optional.
* **enableMasterConnectionPooler**
* **enableConnectionPooler**
Tells the operator to create a connection pooler with a database. If this
field is true, a connection pooler deployment will be created even if
`connectionPooler` section is empty. Optional, not set by default.
@ -387,7 +387,7 @@ CPU and memory limits for the sidecar container.
Parameters are grouped under the `connectionPooler` top-level key and specify
configuration for connection pooler. If this section is not empty, a connection
pooler will be created for a database even if `enableMasterConnectionPooler` is not
pooler will be created for a database even if `enableConnectionPooler` is not
present.
* **numberOfInstances**

View File

@ -736,7 +736,7 @@ manifest:
```yaml
spec:
enableMasterConnectionPooler: true
enableConnectionPooler: true
```
This will tell the operator to create a connection pooler with default
@ -772,7 +772,7 @@ spec:
memory: 100Mi
```
The `enableMasterConnectionPooler` flag is not required when the `connectionPooler`
The `enableConnectionPooler` flag is not required when the `connectionPooler`
section is present in the manifest. But, it can be used to disable/remove the
pooler while keeping its configuration.

View File

@ -99,7 +99,7 @@ class EndToEndTestCase(unittest.TestCase):
'postgresqls', 'acid-minimal-cluster',
{
'spec': {
'enableMasterConnectionPooler': True,
'enableConnectionPooler': True,
}
})
k8s.wait_for_pod_start(pod_selector)
@ -141,7 +141,7 @@ class EndToEndTestCase(unittest.TestCase):
'postgresqls', 'acid-minimal-cluster',
{
'spec': {
'enableMasterConnectionPooler': False,
'enableConnectionPooler': False,
}
})
k8s.wait_for_pods_to_stop(pod_selector)

View File

@ -18,7 +18,7 @@ spec:
- createdb
enableMasterLoadBalancer: false
enableReplicaLoadBalancer: false
enableMasterConnectionPooler: true # not needed when connectionPooler section is present (see below)
enableConnectionPooler: true # not needed when connectionPooler section is present (see below)
enableReplicaConnectionPooler: true # set to enable connectionPooler for replica endpoints
allowedSourceRanges: # load balancers' source ranges for both master and replica services
- 127.0.0.1/32

View File

@ -181,7 +181,7 @@ spec:
# Note: usernames specified here as database owners must be declared in the users key of the spec key.
dockerImage:
type: string
enableMasterConnectionPooler:
enableConnectionPooler:
type: boolean
enableReplicaConnectionPooler:
type: boolean

View File

@ -259,7 +259,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
"dockerImage": {
Type: "string",
},
"enableMasterConnectionPooler": {
"enableConnectionPooler": {
Type: "boolean",
},
"enableReplicaConnectionPooler": {

View File

@ -29,7 +29,7 @@ type PostgresSpec struct {
Patroni `json:"patroni,omitempty"`
Resources `json:"resources,omitempty"`
EnableMasterConnectionPooler *bool `json:"enableMasterConnectionPooler,omitempty"`
EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"`
EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"`
ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"`

View File

@ -831,11 +831,14 @@ func (c *Cluster) Delete() {
// Delete connection pooler objects anyway, even if it's not mentioned in the
// manifest, just to not keep orphaned components in case if something went
// wrong
if err := c.deleteConnectionPooler(); err != nil {
for _, role := range [2]PostgresRole{Master, Replica} {
if err := c.deleteConnectionPooler(role); err != nil {
c.logger.Warningf("could not remove connection pooler: %v", err)
}
}
}
//NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status).
func (c *Cluster) NeedsRepair() (bool, acidv1.PostgresStatus) {
c.specMu.RLock()

View File

@ -719,7 +719,7 @@ func TestInitSystemUsers(t *testing.T) {
}
// cluster with connection pooler
cl.Spec.EnableMasterConnectionPooler = boolToPointer(true)
cl.Spec.EnableConnectionPooler = boolToPointer(true)
cl.initSystemUsers()
if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; !exist {
t.Errorf("%s, connection pooler user is not present", testName)

View File

@ -2273,12 +2273,9 @@ func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec,
return nil, err
}
var name string
name = c.connectionPoolerName(role)
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: c.connectionPoolerName(role),
Namespace: c.Namespace,
Labels: c.connectionPoolerLabelsSelector(role).MatchLabels,
Annotations: map[string]string{},
@ -2311,24 +2308,22 @@ func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec, rol
if spec.ConnectionPooler == nil {
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
}
name := c.connectionPoolerName(role)
serviceSpec := v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Name: name,
Name: c.connectionPoolerName(role),
Port: pgPort,
TargetPort: intstr.IntOrString{StrVal: c.servicePort(role)},
},
},
Type: v1.ServiceTypeClusterIP,
Selector: map[string]string{"connection-pooler": c.connectionPoolerName(role)},
}
serviceSpec.Selector = map[string]string{"connection-pooler": name}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: c.connectionPoolerName(role),
Namespace: c.Namespace,
Labels: c.connectionPoolerLabelsSelector(role).MatchLabels,
Annotations: map[string]string{},

View File

@ -126,7 +126,7 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo
msg = "could not prepare database for connection pooler: %v"
return nil, fmt.Errorf(msg, err)
}
if c.Spec.EnableMasterConnectionPooler != nil || c.ConnectionPooler != nil {
if c.Spec.EnableConnectionPooler != nil || c.ConnectionPooler != nil {
deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec, Master)
if err != nil {
msg = "could not generate deployment for connection pooler: %v"
@ -197,7 +197,7 @@ func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoo
return c.ConnectionPooler, nil
}
func (c *Cluster) deleteConnectionPooler() (err error) {
func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
c.setProcessName("deleting connection pooler")
c.logger.Debugln("deleting connection pooler")
@ -210,7 +210,7 @@ func (c *Cluster) deleteConnectionPooler() (err error) {
// Clean up the deployment object. If deployment resource we've remembered
// is somehow empty, try to delete based on what would we generate
deploymentName := c.connectionPoolerName(Master)
deploymentName := c.connectionPoolerName(role)
deployment := c.ConnectionPooler.Deployment
if deployment != nil {
@ -235,7 +235,7 @@ func (c *Cluster) deleteConnectionPooler() (err error) {
// Repeat the same for the service object
service := c.ConnectionPooler.Service
serviceName := c.connectionPoolerName(Master)
serviceName := c.connectionPoolerName(role)
if service != nil {
serviceName = service.Name

View File

@ -62,7 +62,7 @@ func TestConnectionPoolerCreationAndDeletion(t *testing.T) {
t.Errorf("%s: Connection pooler service is empty", testName)
}
err = cluster.deleteConnectionPooler()
err = cluster.deleteConnectionPooler(Master)
if err != nil {
t.Errorf("%s: Cannot delete connection pooler, %s", testName, err)
}
@ -97,7 +97,7 @@ func TestNeedConnectionPooler(t *testing.T) {
}
cluster.Spec = acidv1.PostgresSpec{
EnableMasterConnectionPooler: boolToPointer(true),
EnableConnectionPooler: boolToPointer(true),
}
if !cluster.needConnectionPooler() {
@ -106,7 +106,7 @@ func TestNeedConnectionPooler(t *testing.T) {
}
cluster.Spec = acidv1.PostgresSpec{
EnableMasterConnectionPooler: boolToPointer(false),
EnableConnectionPooler: boolToPointer(false),
ConnectionPooler: &acidv1.ConnectionPooler{},
}
@ -116,7 +116,7 @@ func TestNeedConnectionPooler(t *testing.T) {
}
cluster.Spec = acidv1.PostgresSpec{
EnableMasterConnectionPooler: boolToPointer(true),
EnableConnectionPooler: boolToPointer(true),
ConnectionPooler: &acidv1.ConnectionPooler{},
}

View File

@ -895,22 +895,25 @@ func (c *Cluster) syncConnectionPooler(oldSpec,
if oldNeedConnectionPooler && !newNeedConnectionPooler {
// delete and cleanup resources
if err = c.deleteConnectionPooler(); err != nil {
for _, role := range [2]PostgresRole{Master, Replica} {
if err = c.deleteConnectionPooler(role); err != nil {
c.logger.Warningf("could not remove connection pooler: %v", err)
}
}
}
if !oldNeedConnectionPooler && !newNeedConnectionPooler {
// delete and cleanup resources if not empty
if c.ConnectionPooler != nil &&
(c.ConnectionPooler.Deployment != nil ||
c.ConnectionPooler.Service != nil) {
if err = c.deleteConnectionPooler(); err != nil {
for _, role := range [2]PostgresRole{Master, Replica} {
if err = c.deleteConnectionPooler(role); err != nil {
c.logger.Warningf("could not remove connection pooler: %v", err)
}
}
}
}
return reason, nil
}

View File

@ -139,7 +139,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableMasterConnectionPooler: boolToPointer(true),
EnableConnectionPooler: boolToPointer(true),
},
},
cluster: clusterMissingObjects,
@ -232,13 +232,13 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
subTest: "there is no sync from nil to an empty spec",
oldSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableMasterConnectionPooler: boolToPointer(true),
EnableConnectionPooler: boolToPointer(true),
ConnectionPooler: nil,
},
},
newSpec: &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
EnableMasterConnectionPooler: boolToPointer(true),
EnableConnectionPooler: boolToPointer(true),
ConnectionPooler: &acidv1.ConnectionPooler{},
},
},

View File

@ -520,8 +520,8 @@ func (c *Cluster) patroniKubernetesUseConfigMaps() bool {
}
func (c *Cluster) needConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
if spec.EnableMasterConnectionPooler != nil {
return *spec.EnableMasterConnectionPooler
if spec.EnableConnectionPooler != nil {
return *spec.EnableConnectionPooler
} else if spec.EnableReplicaConnectionPooler != nil {
return *spec.EnableReplicaConnectionPooler
} else if spec.ConnectionPooler == nil {

View File

@ -607,16 +607,16 @@ def update_postgresql(namespace: str, cluster: str):
spec['volume'] = {'size': size}
if 'enableMasterConnectionPooler' in postgresql['spec']:
cp = postgresql['spec']['enableMasterConnectionPooler']
if 'enableConnectionPooler' in postgresql['spec']:
cp = postgresql['spec']['enableConnectionPooler']
if not cp:
if 'enableMasterConnectionPooler' in o['spec']:
del o['spec']['enableMasterConnectionPooler']
if 'enableConnectionPooler' in o['spec']:
del o['spec']['enableConnectionPooler']
else:
spec['enableMasterConnectionPooler'] = True
spec['enableConnectionPooler'] = True
else:
if 'enableMasterConnectionPooler' in o['spec']:
del o['spec']['enableMasterConnectionPooler']
if 'enableConnectionPooler' in o['spec']:
del o['spec']['enableConnectionPooler']
if 'enableReplicaConnectionPooler' in postgresql['spec']:
cp = postgresql['spec']['enableReplicaConnectionPooler']