diff --git a/e2e/README.md b/e2e/README.md index 3bba6ccc3..5aa987593 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -44,7 +44,7 @@ To run the end 2 end test and keep the kind state execute: NOCLEANUP=True ./run.sh main ``` -## Run indidual test +## Run individual test After having executed a normal E2E run with `NOCLEANUP=True` Kind still continues to run, allowing you subsequent test runs. diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 9e2035652..ecc0b2327 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -160,7 +160,7 @@ class EndToEndTestCase(unittest.TestCase): self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml") self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 1, - "Initial broken deplyment not rolled out") + "Initial broken deployment not rolled out") self.k8s.api.custom_objects_api.patch_namespaced_custom_object( 'acid.zalan.do', 'v1', 'default', @@ -221,6 +221,8 @@ class EndToEndTestCase(unittest.TestCase): self.eventuallyEqual(lambda: k8s.count_services_with_label( 'application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 2, "No pooler service found") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "Pooler secret not created") # Turn off only master connection pooler k8s.api.custom_objects_api.patch_namespaced_custom_object( @@ -246,6 +248,8 @@ class EndToEndTestCase(unittest.TestCase): self.eventuallyEqual(lambda: k8s.count_services_with_label( 'application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 1, "No pooler service found") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "Secret not created") # Turn off only replica connection pooler k8s.api.custom_objects_api.patch_namespaced_custom_object( @@ -268,6 +272,8 @@ class EndToEndTestCase(unittest.TestCase): 0, "Pooler replica pods not deleted") self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 1, "No pooler service found") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), + 1, "Secret not created") # scale up connection pooler deployment k8s.api.custom_objects_api.patch_namespaced_custom_object( @@ -301,6 +307,8 @@ class EndToEndTestCase(unittest.TestCase): 0, "Pooler pods not scaled down") self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 0, "Pooler service not removed") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=spilo,cluster-name=acid-minimal-cluster'), + 4, "Secrets not deleted") # Verify that all the databases have pooler schema installed. # Do this via psql, since otherwise we need to deal with @@ -1034,7 +1042,7 @@ class EndToEndTestCase(unittest.TestCase): except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) raise - + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_zzzz_cluster_deletion(self): ''' diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 1d1d609e4..e6cc60cb2 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -539,13 +539,13 @@ func updateConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, depl // Test if two connection pooler configuration needs to be synced. For simplicity // compare not the actual K8S objects, but the configuration itself and request // sync if there is any difference. -func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) { +func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler, logger *logrus.Entry) (sync bool, reasons []string) { reasons = []string{} sync = false changelog, err := diff.Diff(oldSpec, newSpec) if err != nil { - //c.logger.Infof("Cannot get diff, do not do anything, %+v", err) + logger.Infof("cannot get diff, do not do anything, %+v", err) return false, reasons } @@ -681,13 +681,45 @@ func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql) } func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, LookupFunction InstallFunction) (SyncReason, error) { - logPoolerEssentials(c.logger, oldSpec, newSpec) var reason SyncReason var err error var newNeedConnectionPooler, oldNeedConnectionPooler bool oldNeedConnectionPooler = false + if oldSpec == nil { + oldSpec = &acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{}, + }, + } + } + + needSync, _ := needSyncConnectionPoolerSpecs(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler, c.logger) + masterChanges, err := diff.Diff(oldSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableConnectionPooler) + if err != nil { + c.logger.Error("Error in getting diff of master connection pooler changes") + } + replicaChanges, err := diff.Diff(oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler) + if err != nil { + c.logger.Error("Error in getting diff of replica connection pooler changes") + } + + // skip pooler sync only + // 1. if there is no diff in spec, AND + // 2. if connection pooler is already there and is also required as per newSpec + // + // Handling the case when connectionPooler is not there but it is required + // as per spec, hence do not skip syncing in that case, even though there + // is no diff in specs + if (!needSync && len(masterChanges) <= 0 && len(replicaChanges) <= 0) && + (c.ConnectionPooler != nil && *newSpec.Spec.EnableConnectionPooler) { + c.logger.Debugln("syncing pooler is not required") + return nil, nil + } + + logPoolerEssentials(c.logger, oldSpec, newSpec) + // Check and perform the sync requirements for each of the roles. for _, role := range [2]PostgresRole{Master, Replica} { @@ -841,7 +873,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql var specReason []string if oldSpec != nil { - specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) + specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler, c.logger) } defaultsSync, defaultsReason := needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 6b1af045f..06b074b4c 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1561,11 +1561,17 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) } username := pgUser.Name + lbls := c.labelsSet(true) + + if username == constants.ConnectionPoolerUserName { + lbls = c.connectionPoolerLabels("", false).MatchLabels + } + secret := v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: c.credentialSecretName(username), Namespace: namespace, - Labels: c.labelsSet(true), + Labels: lbls, Annotations: c.annotationsSet(nil), }, Type: v1.SecretTypeOpaque, @@ -1574,6 +1580,7 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) "password": []byte(pgUser.Password), }, } + return &secret }