Merge branch 'master' into feature/tests

# Conflicts:
#	pkg/util/teams/teams.go
This commit is contained in:
Murat Kabilov 2017-06-09 11:35:20 +02:00
commit a198442338
11 changed files with 43 additions and 23 deletions

View File

@ -25,7 +25,6 @@ data:
resource_check_interval: 3s
resource_check_timeout: 10m
resync_period: 5m
resync_period_pod: 5m
super_username: postgres
teams_api_url: http://fake-teams-api.default.svc.cluster.local
workers: "4"

View File

@ -430,7 +430,7 @@ func (c *Cluster) Update(newSpec *spec.Postgresql) error {
c.logger.Infof("%s service '%s' has been created", role, util.NameFromMeta(service.ObjectMeta))
}
}
// only proceeed further if both old and new load balancer were present
// only proceed further if both old and new load balancer were present
if !(newSpec.Spec.ReplicaLoadBalancer && c.Spec.ReplicaLoadBalancer) {
continue
}

View File

@ -39,7 +39,7 @@ func (c *Cluster) Sync() error {
if c.Service[role] != nil {
// delete the left over replica service
if err := c.deleteService(role); err != nil {
return fmt.Errorf("could not delete obsolete %s service: %v", role)
return fmt.Errorf("could not delete obsolete %s service: %v", role, err)
}
}
continue

View File

@ -109,8 +109,8 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume
return fmt.Errorf("could not connect to the volume provider: %v", err)
}
defer func() {
err2 := resizer.DisconnectFromProvider(); if err2 != nil {
c.logger.Errorf("%v", err2)
if err := resizer.DisconnectFromProvider(); err != nil {
c.logger.Errorf("%v", err)
}
}()
}
@ -127,7 +127,7 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume
if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil {
return fmt.Errorf("could not resize the filesystem on pod '%s': %v", podName, err)
}
c.logger.Debugf("filesystem resize successfull on volume %s", pv.Name)
c.logger.Debugf("filesystem resize successful on volume %s", pv.Name)
pv.Spec.Capacity[v1.ResourceStorage] = newQuantity
c.logger.Debugf("updating persistent volume definition for volume %s", pv.Name)
if _, err := c.KubeClient.PersistentVolumes().Update(pv); err != nil {

View File

@ -13,6 +13,7 @@ import (
"github.com/zalando-incubator/postgres-operator/pkg/cluster"
"github.com/zalando-incubator/postgres-operator/pkg/spec"
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
"github.com/zalando-incubator/postgres-operator/pkg/util/teams"
)
@ -38,6 +39,8 @@ type Controller struct {
podCh chan spec.PodEvent
clusterEventQueues []*cache.FIFO
lastClusterSyncTime int64
}
func New(controllerConfig *Config, operatorConfig *config.Config) *Controller {
@ -93,7 +96,7 @@ func (c *Controller) initController() {
c.postgresqlInformer = cache.NewSharedIndexInformer(
clusterLw,
&spec.Postgresql{},
c.opConfig.ResyncPeriod,
constants.QueueResyncPeriodTPR,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
if err := c.postgresqlInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -113,7 +116,7 @@ func (c *Controller) initController() {
c.podInformer = cache.NewSharedIndexInformer(
podLw,
&v1.Pod{},
c.opConfig.ResyncPeriodPod,
constants.QueueResyncPeriodPod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
if err := c.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -141,6 +144,7 @@ func (c *Controller) runInformers(stopCh <-chan struct{}) {
go c.postgresqlInformer.Run(stopCh)
go c.podInformer.Run(stopCh)
go c.podEventsDispatcher(stopCh)
go c.clusterResync(stopCh)
<-stopCh
}

View File

@ -3,6 +3,8 @@ package controller
import (
"fmt"
"reflect"
"sync/atomic"
"time"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/meta"
@ -18,6 +20,19 @@ import (
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
)
func (c *Controller) clusterResync(stopCh <-chan struct{}) {
ticker := time.NewTicker(c.opConfig.ResyncPeriod)
for {
select {
case <-ticker.C:
c.clusterListFunc(api.ListOptions{ResourceVersion: "0"})
case <-stopCh:
return
}
}
}
func (c *Controller) clusterListFunc(options api.ListOptions) (runtime.Object, error) {
c.logger.Info("Getting list of currently running clusters")
@ -37,6 +52,11 @@ func (c *Controller) clusterListFunc(options api.ListOptions) (runtime.Object, e
return nil, fmt.Errorf("could not extract list of postgresql objects: %v", err)
}
if time.Now().Unix()-atomic.LoadInt64(&c.lastClusterSyncTime) <= int64(c.opConfig.ResyncPeriod.Seconds()) {
c.logger.Debugln("skipping resync of clusters")
return object, err
}
var activeClustersCnt, failedClustersCnt int
for _, obj := range objList {
pg, ok := obj.(*spec.Postgresql)
@ -63,6 +83,8 @@ func (c *Controller) clusterListFunc(options api.ListOptions) (runtime.Object, e
c.logger.Infof("No clusters running")
}
atomic.StoreInt64(&c.lastClusterSyncTime, time.Now().Unix())
return object, err
}

View File

@ -135,12 +135,12 @@ func (m *MaintenanceWindow) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s-%s\"",
m.StartTime.Format("15:04"),
m.EndTime.Format("15:04"))), nil
} else {
return []byte(fmt.Sprintf("\"%s:%s-%s\"",
m.Weekday.String()[:3],
m.StartTime.Format("15:04"),
m.EndTime.Format("15:04"))), nil
}
return []byte(fmt.Sprintf("\"%s:%s-%s\"",
m.Weekday.String()[:3],
m.StartTime.Format("15:04"),
m.EndTime.Format("15:04"))), nil
}
// UnmarshalJSON convets a JSON to the maintenance window definition.

View File

@ -282,7 +282,7 @@ var postgresqlList = []struct {
Kind: "List",
APIVersion: "v1",
},
Items: []Postgresql{Postgresql{
Items: []Postgresql{{
TypeMeta: unversioned.TypeMeta{
Kind: "Postgresql",
APIVersion: "acid.zalan.do/v1",
@ -382,7 +382,7 @@ func TestUnmarshalMaintenanceWindow(t *testing.T) {
}
if !reflect.DeepEqual(m, tt.out) {
t.Errorf("Expected maintenace window: %#v, got: %#v", tt.out, m)
t.Errorf("Expected maintenance window: %#v, got: %#v", tt.out, m)
}
}
}

View File

@ -15,7 +15,6 @@ type TPR struct {
}
type Resources struct {
ResyncPeriodPod time.Duration `name:"resync_period_pod" default:"5m"`
ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"`
ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"`
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`

View File

@ -10,4 +10,7 @@ const (
K8sAPIPath = "/api"
StatefulsetDeletionInterval = 1 * time.Second
StatefulsetDeletionTimeout = 30 * time.Second
QueueResyncPeriodPod = 5 * time.Minute
QueueResyncPeriodTPR = 5 * time.Minute
)

View File

@ -25,13 +25,6 @@ var pgUsers = []struct {
MemberOf: []string{}},
"md592f413f3974bdf3799bb6fecb5f9f2c6"}}
var prettyTest = []struct {
in interface{}
out string
}{
{pgUsers, `[{{test password [] []} md587f77988ccb5aa917c93201ba314fcd4} {{test md592f413f3974bdf3799bb6fecb5f9f2c6 [] []} md592f413f3974bdf3799bb6fecb5f9f2c6}]`},
}
var prettyDiffTest = []struct {
inA interface{}
inB interface{}