Feature/unit tests (#53)

- Avoid relying on Clientset structure to call Kubernetes API functions.
While Clientset is a convinient "catch-all" abstraction for calling
REST API related to different Kubernetes objects, it's impossible to
mock. Replacing it wih the kubernetes.Interface would be quite
straightforward, but would require an exra level of mocked interfaces,
because of the versioning. Instead, a new interface is defined, which
contains only the objects we need of the pre-defined versions.

-  Move KubernetesClient to k8sutil package.
- Add more tests.
This commit is contained in:
Oleksii Kliukin 2017-07-24 16:56:46 +02:00 committed by GitHub
parent 4f36e447c3
commit 4455f1b639
8 changed files with 201 additions and 19 deletions

View File

@ -48,7 +48,7 @@ func ControllerConfig() *controller.Config {
log.Fatalf("Can't get REST config: %s", err)
}
client, err := k8sutil.KubernetesClient(restConfig)
client, err := k8sutil.ClientSet(restConfig)
if err != nil {
log.Fatalf("Can't create client: %s", err)
}
@ -60,7 +60,7 @@ func ControllerConfig() *controller.Config {
return &controller.Config{
RestConfig: restConfig,
KubeClient: client,
KubeClient: k8sutil.NewFromKubernetesInterface(client),
RestClient: restClient,
}
}
@ -101,7 +101,7 @@ func main() {
log.Printf("Config: %s", cfg.MustMarshal())
c := controller.New(controllerConfig, cfg)
c := controller.NewController(controllerConfig, cfg)
c.Run(stop, wg)
sig := <-sigs

View File

@ -11,7 +11,6 @@ import (
"sync"
"github.com/Sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/apps/v1beta1"
@ -36,7 +35,7 @@ var (
// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication.
type Config struct {
KubeClient *kubernetes.Clientset //TODO: move clients to the better place?
KubeClient k8sutil.KubernetesClient
RestClient *rest.RESTClient
RestConfig *rest.Config
TeamsAPIClient *teams.API

View File

@ -5,7 +5,6 @@ import (
"sync"
"github.com/Sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
@ -14,12 +13,13 @@ import (
"github.com/zalando-incubator/postgres-operator/pkg/spec"
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
"github.com/zalando-incubator/postgres-operator/pkg/util/teams"
)
type Config struct {
RestConfig *rest.Config
KubeClient *kubernetes.Clientset
KubeClient k8sutil.KubernetesClient
RestClient *rest.RESTClient
TeamsAPIClient *teams.API
InfrastructureRoles map[string]spec.PgUser
@ -27,6 +27,7 @@ type Config struct {
type Controller struct {
Config
opConfig *config.Config
logger *logrus.Entry
@ -43,7 +44,7 @@ type Controller struct {
lastClusterSyncTime int64
}
func New(controllerConfig *Config, operatorConfig *config.Config) *Controller {
func NewController(controllerConfig *Config, operatorConfig *config.Config) *Controller {
logger := logrus.New()
if operatorConfig.DebugLogging {
@ -82,7 +83,7 @@ func (c *Controller) initController() {
c.logger.Fatalf("could not register ThirdPartyResource: %v", err)
}
if infraRoles, err := c.getInfrastructureRoles(); err != nil {
if infraRoles, err := c.getInfrastructureRoles(&c.opConfig.InfrastructureRolesSecretName); err != nil {
c.logger.Warningf("could not get infrastructure roles: %v", err)
} else {
c.InfrastructureRoles = infraRoles

View File

@ -29,7 +29,7 @@ func (c *Controller) podListFunc(options api.ListOptions) (runtime.Object, error
TimeoutSeconds: options.TimeoutSeconds,
}
return c.KubeClient.CoreV1().Pods(c.opConfig.Namespace).List(opts)
return c.KubeClient.Pods(c.opConfig.Namespace).List(opts)
}
func (c *Controller) podWatchFunc(options api.ListOptions) (watch.Interface, error) {
@ -52,7 +52,7 @@ func (c *Controller) podWatchFunc(options api.ListOptions) (watch.Interface, err
TimeoutSeconds: options.TimeoutSeconds,
}
return c.KubeClient.CoreV1Client.Pods(c.opConfig.Namespace).Watch(opts)
return c.KubeClient.Pods(c.opConfig.Namespace).Watch(opts)
}
func (c *Controller) podAdd(obj interface{}) {

View File

@ -51,7 +51,7 @@ func (c *Controller) createTPR() error {
TPRName := fmt.Sprintf("%s.%s", constants.TPRName, constants.TPRVendor)
tpr := thirdPartyResource(TPRName)
_, err := c.KubeClient.ExtensionsV1beta1().ThirdPartyResources().Create(tpr)
_, err := c.KubeClient.ThirdPartyResources().Create(tpr)
if err != nil {
if !k8sutil.ResourceAlreadyExists(err) {
return err
@ -64,17 +64,17 @@ func (c *Controller) createTPR() error {
return k8sutil.WaitTPRReady(c.RestClient, c.opConfig.TPR.ReadyWaitInterval, c.opConfig.TPR.ReadyWaitTimeout, c.opConfig.Namespace)
}
func (c *Controller) getInfrastructureRoles() (result map[string]spec.PgUser, err error) {
if c.opConfig.InfrastructureRolesSecretName == (spec.NamespacedName{}) {
func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (result map[string]spec.PgUser, err error) {
if *rolesSecret == (spec.NamespacedName{}) {
// we don't have infrastructure roles defined, bail out
return nil, nil
}
infraRolesSecret, err := c.KubeClient.
Secrets(c.opConfig.InfrastructureRolesSecretName.Namespace).
Get(c.opConfig.InfrastructureRolesSecretName.Name)
Secrets(rolesSecret.Namespace).
Get(rolesSecret.Name)
if err != nil {
c.logger.Debugf("Infrastructure roles secret name: %s", c.opConfig.InfrastructureRolesSecretName)
c.logger.Debugf("Infrastructure roles secret name: %s", *rolesSecret)
return nil, fmt.Errorf("could not get infrastructure roles secret: %v", err)
}

154
pkg/controller/util_test.go Normal file
View File

@ -0,0 +1,154 @@
package controller
import (
"fmt"
"reflect"
"testing"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/pkg/api/v1"
"github.com/zalando-incubator/postgres-operator/pkg/spec"
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
)
const (
testInfrastructureRolesSecretName = "infrastructureroles-test"
)
type mockSecret struct {
v1core.SecretInterface
}
func (c *mockSecret) Get(name string) (*v1.Secret, error) {
if name != testInfrastructureRolesSecretName {
return nil, fmt.Errorf("NotFound")
}
secret := &v1.Secret{}
secret.Name = mockController.opConfig.ClusterNameLabel
secret.Data = map[string][]byte{
"user1": []byte("testrole"),
"password1": []byte("testpassword"),
"inrole1": []byte("testinrole"),
}
return secret, nil
}
type MockSecretGetter struct {
}
func (c *MockSecretGetter) Secrets(namespace string) v1core.SecretInterface {
return &mockSecret{}
}
func newMockKubernetesClient() k8sutil.KubernetesClient {
return k8sutil.KubernetesClient{SecretsGetter: &MockSecretGetter{}}
}
func newMockController() *Controller {
controller := NewController(&Config{}, &config.Config{})
controller.opConfig.ClusterNameLabel = "cluster-name"
controller.opConfig.InfrastructureRolesSecretName =
spec.NamespacedName{v1.NamespaceDefault, testInfrastructureRolesSecretName}
controller.opConfig.Workers = 4
controller.KubeClient = newMockKubernetesClient()
return controller
}
var mockController = newMockController()
func TestPodClusterName(t *testing.T) {
var testTable = []struct {
in *v1.Pod
expected spec.NamespacedName
}{
{
&v1.Pod{},
spec.NamespacedName{},
},
{
&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Namespace: v1.NamespaceDefault,
Labels: map[string]string{
mockController.opConfig.ClusterNameLabel: "testcluster",
},
},
},
spec.NamespacedName{v1.NamespaceDefault, "testcluster"},
},
}
for _, test := range testTable {
resp := mockController.podClusterName(test.in)
if resp != test.expected {
t.Errorf("expected response %v does not match the actual %v", test.expected, resp)
}
}
}
func TestClusterWorkerID(t *testing.T) {
var testTable = []struct {
in spec.NamespacedName
expected uint32
}{
{
in: spec.NamespacedName{"foo", "bar"},
expected: 2,
},
{
in: spec.NamespacedName{"default", "testcluster"},
expected: 3,
},
}
for _, test := range testTable {
resp := mockController.clusterWorkerID(test.in)
if resp != test.expected {
t.Errorf("expected response %v does not match the actual %v", test.expected, resp)
}
}
}
func TestGetInfrastructureRoles(t *testing.T) {
var testTable = []struct {
secretName spec.NamespacedName
expectedRoles map[string]spec.PgUser
expectedError error
}{
{
spec.NamespacedName{},
nil,
nil,
},
{
spec.NamespacedName{v1.NamespaceDefault, "null"},
nil,
fmt.Errorf(`could not get infrastructure roles secret: NotFound`),
},
{
spec.NamespacedName{v1.NamespaceDefault, testInfrastructureRolesSecretName},
map[string]spec.PgUser{
"testrole": {
"testrole",
"testpassword",
nil,
[]string{"testinrole"},
},
},
nil,
},
}
for _, test := range testTable {
roles, err := mockController.getInfrastructureRoles(&test.secretName)
if err != test.expectedError {
if err != nil && test.expectedError != nil && err.Error() == test.expectedError.Error() {
continue
}
t.Errorf("expected error '%v' does not match the actual error '%v'", test.expectedError, err)
}
if !reflect.DeepEqual(roles, test.expectedRoles) {
t.Errorf("expected roles output %v does not match the actual %v", test.expectedRoles, roles)
}
}
}

View File

@ -3,7 +3,6 @@ package spec
import (
"fmt"
"strings"
"database/sql"
"k8s.io/client-go/pkg/api/v1"

View File

@ -5,6 +5,9 @@ import (
"time"
"k8s.io/client-go/kubernetes"
v1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
extensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
"k8s.io/client-go/pkg/api"
apierrors "k8s.io/client-go/pkg/api/errors"
"k8s.io/client-go/pkg/api/unversioned"
@ -18,6 +21,32 @@ import (
"github.com/zalando-incubator/postgres-operator/pkg/util/retryutil"
)
type KubernetesClient struct {
v1core.SecretsGetter
v1core.ServicesGetter
v1core.EndpointsGetter
v1core.PodsGetter
v1core.PersistentVolumesGetter
v1core.PersistentVolumeClaimsGetter
v1core.ConfigMapsGetter
v1beta1.StatefulSetsGetter
extensions.ThirdPartyResourcesGetter
}
func NewFromKubernetesInterface(src kubernetes.Interface) (c KubernetesClient) {
c = KubernetesClient{}
c.PodsGetter = src.CoreV1()
c.ServicesGetter = src.CoreV1()
c.EndpointsGetter = src.CoreV1()
c.SecretsGetter = src.CoreV1()
c.ConfigMapsGetter = src.CoreV1()
c.PersistentVolumeClaimsGetter = src.CoreV1()
c.PersistentVolumesGetter = src.CoreV1()
c.StatefulSetsGetter = src.AppsV1beta1()
c.ThirdPartyResourcesGetter = src.ExtensionsV1beta1()
return
}
func RestConfig(kubeConfig string, outOfCluster bool) (*rest.Config, error) {
if outOfCluster {
return clientcmd.BuildConfigFromFlags("", kubeConfig)
@ -25,7 +54,7 @@ func RestConfig(kubeConfig string, outOfCluster bool) (*rest.Config, error) {
return rest.InClusterConfig()
}
func KubernetesClient(config *rest.Config) (client *kubernetes.Clientset, err error) {
func ClientSet(config *rest.Config) (client *kubernetes.Clientset, err error) {
return kubernetes.NewForConfig(config)
}