diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 85bcb6245..6c816524f 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -168,6 +168,9 @@ class K8s: def count_secrets_with_label(self, labels, namespace='default'): return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items) + def count_secrets_in_namespace(self, labels, namespace): + return len(self.api.core_v1.list_namespaced_secret(namespace).items) + def count_statefulsets_with_label(self, labels, namespace='default'): return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 114f881c4..422fec991 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -203,7 +203,7 @@ class EndToEndTestCase(unittest.TestCase): self.k8s.update_config(enable_postgres_team_crd) self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - + self.k8s.api.custom_objects_api.patch_namespaced_custom_object( 'acid.zalan.do', 'v1', 'default', 'postgresteams', 'custom-team-membership', @@ -232,7 +232,7 @@ class EndToEndTestCase(unittest.TestCase): WHERE usename IN ('elephant', 'kind'); """ users = self.query_database(leader.metadata.name, "postgres", user_query) - self.eventuallyEqual(lambda: len(users), 2, + self.eventuallyEqual(lambda: len(users), 2, "Not all additional users found in database: {}".format(users)) # revert config change @@ -414,7 +414,7 @@ class EndToEndTestCase(unittest.TestCase): db_list = self.list_databases(leader.metadata.name) for db in db_list: - self.eventuallyNotEqual(lambda: len(self.query_database(leader.metadata.name, db, schemas_query)), 0, + self.eventuallyNotEqual(lambda: len(self.query_database(leader.metadata.name, db, schemas_query)), 0, "Pooler schema not found in database {}".format(db)) # remove config section to make test work next time @@ -542,6 +542,25 @@ class EndToEndTestCase(unittest.TestCase): print('Operator log: {}'.format(k8s.get_operator_log())) raise + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_cross_namespace_secrets(self): + ''' + Test secrets in different namespace + ''' + k8s = self.k8s + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'users':{ + 'appspace.db_user': [], + } + } + }) + self.eventuallyEqual(lambda: k8s.count_secrets_in_namespace('appspace'), + 1, "Secret not created in user namespace") + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_lazy_spilo_upgrade(self): ''' @@ -744,7 +763,7 @@ class EndToEndTestCase(unittest.TestCase): } } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No two pods running after lazy rolling upgrade") diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 1f6510e65..b4c497522 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -7,12 +7,14 @@ import ( "github.com/sirupsen/logrus" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/teams" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/record" ) @@ -845,3 +847,64 @@ func TestPreparedDatabases(t *testing.T) { } } } + +func TestCrossNamespacedSecrets(t *testing.T) { + testName := "test secrets in different namespace" + clientSet := fake.NewSimpleClientset() + acidClientSet := fakeacidv1.NewSimpleClientset() + namespace := "default" + + client := k8sutil.KubernetesClient{ + StatefulSetsGetter: clientSet.AppsV1(), + ServicesGetter: clientSet.CoreV1(), + DeploymentsGetter: clientSet.AppsV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + SecretsGetter: clientSet.CoreV1(), + } + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acid-fake-cluster", + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Volume: acidv1.Volume{ + Size: "1Gi", + }, + Users: map[string]acidv1.UserFlags{ + "appspace.db_user": {}, + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + err := cluster.initRobotUsers() + if err != nil { + t.Errorf("%s Could not create namespaced users with error: %s", testName, err) + } + + if cluster.pgUsers["db_user"].Namespace == cluster.Namespace { + t.Errorf("%s: Could not create namespaced users", testName) + } +}