From b40ea2c4263fbca4c0521b87e5df11fdd1aec512 Mon Sep 17 00:00:00 2001 From: Dmitrii Dolgov <9erthalion6@gmail.com> Date: Wed, 22 Jan 2020 14:49:54 +0100 Subject: [PATCH] Add more tests --- pkg/cluster/cluster.go | 2 +- pkg/cluster/k8sres.go | 5 + pkg/cluster/k8sres_test.go | 327 +++++++++++++++++++++++++++++++++- pkg/cluster/resources.go | 4 +- pkg/cluster/resources_test.go | 65 +++++++ pkg/cluster/types.go | 2 + pkg/util/k8sutil/k8sutil.go | 57 +++++- 7 files changed, 452 insertions(+), 10 deletions(-) create mode 100644 pkg/cluster/resources_test.go diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 1681e7d2e..afcb0df82 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -336,7 +336,7 @@ func (c *Cluster) Create() error { c.logger.Warning("Connection pool already exists in the cluster") return nil } - connPool, err := c.createConnectionPool() + connPool, err := c.createConnectionPool(c.installLookupFunction) if err != nil { c.logger.Warningf("could not create connection pool: %v", err) return nil diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index f992c2244..8b7860886 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1842,6 +1842,11 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) ( func (c *Cluster) ownerReferences() []metav1.OwnerReference { controller := true + if c.Statefulset == nil { + c.logger.Warning("Cannot get owner reference, no statefulset") + return []metav1.OwnerReference{} + } + return []metav1.OwnerReference{ { UID: c.Statefulset.ObjectMeta.UID, diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index aa9ef6513..012df4072 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -2,6 +2,7 @@ package cluster import ( "errors" + "fmt" "reflect" v1 "k8s.io/api/core/v1" @@ -14,6 +15,7 @@ import ( "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + appsv1 "k8s.io/api/apps/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -453,7 +455,80 @@ func TestSecretVolume(t *testing.T) { } } -func TestConnPoolPodTemplate(t *testing.T) { +func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { + cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"] + if cpuReq.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPURequest { + return fmt.Errorf("CPU request doesn't match, got %s, expected %s", + cpuReq.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPURequest) + } + + memReq := podSpec.Spec.Containers[0].Resources.Requests["memory"] + if memReq.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryRequest { + return fmt.Errorf("Memory request doesn't match, got %s, expected %s", + memReq.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryRequest) + } + + cpuLim := podSpec.Spec.Containers[0].Resources.Limits["cpu"] + if cpuLim.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPULimit { + return fmt.Errorf("CPU limit doesn't match, got %s, expected %s", + cpuLim.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPULimit) + } + + memLim := podSpec.Spec.Containers[0].Resources.Limits["memory"] + if memLim.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryLimit { + return fmt.Errorf("Memory limit doesn't match, got %s, expected %s", + memLim.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryLimit) + } + + return nil +} + +func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { + poolLabels := podSpec.ObjectMeta.Labels["connection-pool"] + + if poolLabels != cluster.connPoolLabelsSelector().MatchLabels["connection-pool"] { + return fmt.Errorf("Pod labels do not match, got %+v, expected %+v", + podSpec.ObjectMeta.Labels, cluster.connPoolLabelsSelector().MatchLabels) + } + + return nil +} + +func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { + required := map[string]bool{ + "PGHOST": false, + "PGPORT": false, + "PGUSER": false, + "PGSCHEMA": false, + "PGPASSWORD": false, + "CONNECTION_POOL_MODE": false, + "CONNECTION_POOL_PORT": false, + } + + envs := podSpec.Spec.Containers[0].Env + for _, env := range envs { + required[env.Name] = true + } + + for env, value := range required { + if !value { + return fmt.Errorf("Environment variable %s is not present", env) + } + } + + return nil +} + +func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { + if podSpec.ObjectMeta.Name != "test-pod-template" { + return fmt.Errorf("Custom pod template is not used, current spec %+v", + podSpec) + } + + return nil +} + +func TestConnPoolPodSpec(t *testing.T) { testName := "Test connection pool pod template generation" var cluster = New( Config{ @@ -484,19 +559,23 @@ func TestConnPoolPodTemplate(t *testing.T) { }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) + noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { return nil } + tests := []struct { subTest string spec *acidv1.PostgresSpec expected error cluster *Cluster + check func(cluster *Cluster, podSpec *v1.PodTemplateSpec) error }{ { - subTest: "empty pod template", + subTest: "default configuration", spec: &acidv1.PostgresSpec{ ConnectionPool: &acidv1.ConnectionPool{}, }, expected: nil, cluster: cluster, + check: noCheck, }, { subTest: "no default resources", @@ -505,14 +584,256 @@ func TestConnPoolPodTemplate(t *testing.T) { }, expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`), cluster: clusterNoDefaultRes, + check: noCheck, + }, + { + subTest: "default resources are set", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + }, + expected: nil, + cluster: cluster, + check: testResources, + }, + { + subTest: "labels for service", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + }, + expected: nil, + cluster: cluster, + check: testLabels, + }, + { + subTest: "required envs", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + }, + expected: nil, + cluster: cluster, + check: testEnvs, + }, + { + subTest: "custom pod template", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{ + PodTemplate: &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-template", + }, + }, + }, + }, + expected: nil, + cluster: cluster, + check: testCustomPodTemplate, }, } for _, tt := range tests { - _, err := tt.cluster.generateConnPoolPodTemplate(tt.spec) + podSpec, err := tt.cluster.generateConnPoolPodTemplate(tt.spec) if err != tt.expected && err.Error() != tt.expected.Error() { t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", testName, tt.subTest, err, tt.expected) } + + err = tt.check(cluster, podSpec) + if err != nil { + t.Errorf("%s [%s]: Pod spec is incorrect, %+v", + testName, tt.subTest, err) + } + } +} + +func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployment) error { + owner := deployment.ObjectMeta.OwnerReferences[0] + + if owner.Name != cluster.Statefulset.ObjectMeta.Name { + return fmt.Errorf("Ownere reference is incorrect, got %s, expected %s", + owner.Name, cluster.Statefulset.ObjectMeta.Name) + } + + return nil +} + +func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error { + labels := deployment.Spec.Selector.MatchLabels + expected := cluster.connPoolLabelsSelector().MatchLabels + + if labels["connection-pool"] != expected["connection-pool"] { + return fmt.Errorf("Labels are incorrect, got %+v, expected %+v", + labels, expected) + } + + return nil +} + +func TestConnPoolDeploymentSpec(t *testing.T) { + testName := "Test connection pool deployment spec generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPool: config.ConnectionPool{ + ConnPoolDefaultCPURequest: "100m", + ConnPoolDefaultCPULimit: "100m", + ConnPoolDefaultMemoryRequest: "100M", + ConnPoolDefaultMemoryLimit: "100M", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + + noCheck := func(cluster *Cluster, deployment *appsv1.Deployment) error { + return nil + } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + expected error + cluster *Cluster + check func(cluster *Cluster, deployment *appsv1.Deployment) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + }, + expected: nil, + cluster: cluster, + check: noCheck, + }, + { + subTest: "owner reference", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + }, + expected: nil, + cluster: cluster, + check: testDeploymentOwnwerReference, + }, + { + subTest: "selector", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + }, + expected: nil, + cluster: cluster, + check: testSelector, + }, + } + for _, tt := range tests { + deployment, err := tt.cluster.generateConnPoolDeployment(tt.spec) + + if err != tt.expected && err.Error() != tt.expected.Error() { + t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v", + testName, tt.subTest, err, tt.expected) + } + + err = tt.check(cluster, deployment) + if err != nil { + t.Errorf("%s [%s]: Deployment spec is incorrect, %+v", + testName, tt.subTest, err) + } + } +} + +func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error { + owner := service.ObjectMeta.OwnerReferences[0] + + if owner.Name != cluster.Statefulset.ObjectMeta.Name { + return fmt.Errorf("Ownere reference is incorrect, got %s, expected %s", + owner.Name, cluster.Statefulset.ObjectMeta.Name) + } + + return nil +} + +func testServiceSelector(cluster *Cluster, service *v1.Service) error { + selector := service.Spec.Selector + + if selector["connection-pool"] != cluster.connPoolName() { + return fmt.Errorf("Selector is incorrect, got %s, expected %s", + selector["connection-pool"], cluster.connPoolName()) + } + + return nil +} + +func TestConnPoolServiceSpec(t *testing.T) { + testName := "Test connection pool service spec generation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPool: config.ConnectionPool{ + ConnPoolDefaultCPURequest: "100m", + ConnPoolDefaultCPULimit: "100m", + ConnPoolDefaultMemoryRequest: "100M", + ConnPoolDefaultMemoryLimit: "100M", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + + noCheck := func(cluster *Cluster, deployment *v1.Service) error { + return nil + } + + tests := []struct { + subTest string + spec *acidv1.PostgresSpec + cluster *Cluster + check func(cluster *Cluster, deployment *v1.Service) error + }{ + { + subTest: "default configuration", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + }, + cluster: cluster, + check: noCheck, + }, + { + subTest: "owner reference", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + }, + cluster: cluster, + check: testServiceOwnwerReference, + }, + { + subTest: "selector", + spec: &acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + }, + cluster: cluster, + check: testServiceSelector, + }, + } + for _, tt := range tests { + service := tt.cluster.generateConnPoolService(tt.spec) + + if err := tt.check(cluster, service); err != nil { + t.Errorf("%s [%s]: Service spec is incorrect, %+v", + testName, tt.subTest, err) + } } } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index b4c7e578f..4f9d72e19 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -97,11 +97,11 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) { // // After that create all the objects for connection pool, namely a deployment // with a chosen pooler and a service to expose it. -func (c *Cluster) createConnectionPool() (*ConnectionPoolResources, error) { +func (c *Cluster) createConnectionPool(lookup InstallFunction) (*ConnectionPoolResources, error) { var msg string c.setProcessName("creating connection pool") - err := c.installLookupFunction( + err := lookup( c.OpConfig.ConnectionPool.Schema, c.OpConfig.ConnectionPool.User) diff --git a/pkg/cluster/resources_test.go b/pkg/cluster/resources_test.go new file mode 100644 index 000000000..a3754c564 --- /dev/null +++ b/pkg/cluster/resources_test.go @@ -0,0 +1,65 @@ +package cluster + +import ( + "testing" + + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func mockInstallLookupFunction(schema string, user string) error { + return nil +} + +func TestConnPoolCreationAndDeletion(t *testing.T) { + testName := "Test connection pool creation" + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPool: config.ConnectionPool{ + ConnPoolDefaultCPURequest: "100m", + ConnPoolDefaultCPULimit: "100m", + ConnPoolDefaultMemoryRequest: "100M", + ConnPoolDefaultMemoryLimit: "100M", + }, + }, + }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger) + + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPool: &acidv1.ConnectionPool{}, + } + poolResources, err := cluster.createConnectionPool(mockInstallLookupFunction) + + if err != nil { + t.Errorf("%s: Cannot create connection pool, %s, %+v", + testName, err, poolResources) + } + + if poolResources.Deployment == nil { + t.Errorf("%s: Connection pool deployment is empty", testName) + } + + if poolResources.Service == nil { + t.Errorf("%s: Connection pool service is empty", testName) + } + + err = cluster.deleteConnectionPool() + if err != nil { + t.Errorf("%s: Cannot delete connection pool, %s", testName, err) + } +} diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index 286505621..04d00cb58 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -71,3 +71,5 @@ type ClusterStatus struct { } type TemplateParams map[string]interface{} + +type InstallFunction func(schema string, user string) error diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index 672d94634..77e46476b 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -10,6 +10,7 @@ import ( clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" "github.com/zalando/postgres-operator/pkg/util/constants" + apiappsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" policybeta1 "k8s.io/api/policy/v1beta1" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -57,6 +58,20 @@ type mockSecret struct { type MockSecretGetter struct { } +type mockDeployment struct { + appsv1.DeploymentInterface +} + +type MockDeploymentGetter struct { +} + +type mockService struct { + corev1.ServiceInterface +} + +type MockServiceGetter struct { +} + type mockConfigMap struct { corev1.ConfigMapInterface } @@ -217,19 +232,53 @@ func (c *mockConfigMap) Get(name string, options metav1.GetOptions) (*v1.ConfigM } // Secrets to be mocked -func (c *MockSecretGetter) Secrets(namespace string) corev1.SecretInterface { +func (mock *MockSecretGetter) Secrets(namespace string) corev1.SecretInterface { return &mockSecret{} } // ConfigMaps to be mocked -func (c *MockConfigMapsGetter) ConfigMaps(namespace string) corev1.ConfigMapInterface { +func (mock *MockConfigMapsGetter) ConfigMaps(namespace string) corev1.ConfigMapInterface { return &mockConfigMap{} } +func (mock *MockDeploymentGetter) Deployments(namespace string) appsv1.DeploymentInterface { + return &mockDeployment{} +} + +func (mock *mockDeployment) Create(*apiappsv1.Deployment) (*apiappsv1.Deployment, error) { + return &apiappsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + }, + }, nil +} + +func (mock *mockDeployment) Delete(name string, opts *metav1.DeleteOptions) error { + return nil +} + +func (mock *MockServiceGetter) Services(namespace string) corev1.ServiceInterface { + return &mockService{} +} + +func (mock *mockService) Create(*v1.Service) (*v1.Service, error) { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + }, + }, nil +} + +func (mock *mockService) Delete(name string, opts *metav1.DeleteOptions) error { + return nil +} + // NewMockKubernetesClient for other tests func NewMockKubernetesClient() KubernetesClient { return KubernetesClient{ - SecretsGetter: &MockSecretGetter{}, - ConfigMapsGetter: &MockConfigMapsGetter{}, + SecretsGetter: &MockSecretGetter{}, + ConfigMapsGetter: &MockConfigMapsGetter{}, + DeploymentsGetter: &MockDeploymentGetter{}, + ServicesGetter: &MockServiceGetter{}, } }