Use queues for the pod events (#30)
This commit is contained in:
		
							parent
							
								
									132c8425e6
								
							
						
					
					
						commit
						009db16c7c
					
				|  | @ -17,6 +17,7 @@ import ( | ||||||
| 	"k8s.io/client-go/pkg/apis/apps/v1beta1" | 	"k8s.io/client-go/pkg/apis/apps/v1beta1" | ||||||
| 	"k8s.io/client-go/pkg/types" | 	"k8s.io/client-go/pkg/types" | ||||||
| 	"k8s.io/client-go/rest" | 	"k8s.io/client-go/rest" | ||||||
|  | 	"k8s.io/client-go/tools/cache" | ||||||
| 
 | 
 | ||||||
| 	"github.com/zalando-incubator/postgres-operator/pkg/spec" | 	"github.com/zalando-incubator/postgres-operator/pkg/spec" | ||||||
| 	"github.com/zalando-incubator/postgres-operator/pkg/util" | 	"github.com/zalando-incubator/postgres-operator/pkg/util" | ||||||
|  | @ -54,18 +55,17 @@ type Cluster struct { | ||||||
| 	kubeResources | 	kubeResources | ||||||
| 	spec.Postgresql | 	spec.Postgresql | ||||||
| 	Config | 	Config | ||||||
| 	logger               *logrus.Entry | 	logger           *logrus.Entry | ||||||
| 	pgUsers              map[string]spec.PgUser | 	pgUsers          map[string]spec.PgUser | ||||||
| 	systemUsers          map[string]spec.PgUser | 	systemUsers      map[string]spec.PgUser | ||||||
| 	podEvents            chan spec.PodEvent | 	podSubscribers   map[spec.NamespacedName]chan spec.PodEvent | ||||||
| 	podSubscribers       map[spec.NamespacedName]chan spec.PodEvent | 	podSubscribersMu sync.RWMutex | ||||||
| 	podSubscribersMu     sync.RWMutex | 	pgDb             *sql.DB | ||||||
| 	pgDb                 *sql.DB | 	mu               sync.Mutex | ||||||
| 	mu                   sync.Mutex | 	masterLess       bool | ||||||
| 	masterLess           bool | 	userSyncStrategy spec.UserSyncer | ||||||
| 	podDispatcherRunning bool | 	deleteOptions    *v1.DeleteOptions | ||||||
| 	userSyncStrategy     spec.UserSyncer | 	podEventsQueue   *cache.FIFO | ||||||
| 	deleteOptions        *v1.DeleteOptions |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func New(cfg Config, pgSpec spec.Postgresql, logger *logrus.Entry) *Cluster { | func New(cfg Config, pgSpec spec.Postgresql, logger *logrus.Entry) *Cluster { | ||||||
|  | @ -73,19 +73,27 @@ func New(cfg Config, pgSpec spec.Postgresql, logger *logrus.Entry) *Cluster { | ||||||
| 	kubeResources := kubeResources{Secrets: make(map[types.UID]*v1.Secret)} | 	kubeResources := kubeResources{Secrets: make(map[types.UID]*v1.Secret)} | ||||||
| 	orphanDependents := true | 	orphanDependents := true | ||||||
| 
 | 
 | ||||||
|  | 	podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) { | ||||||
|  | 		e, ok := obj.(spec.PodEvent) | ||||||
|  | 		if !ok { | ||||||
|  | 			return "", fmt.Errorf("could not cast to PodEvent") | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
| 	cluster := &Cluster{ | 	cluster := &Cluster{ | ||||||
| 		Config:               cfg, | 		Config:           cfg, | ||||||
| 		Postgresql:           pgSpec, | 		Postgresql:       pgSpec, | ||||||
| 		logger:               lg, | 		logger:           lg, | ||||||
| 		pgUsers:              make(map[string]spec.PgUser), | 		pgUsers:          make(map[string]spec.PgUser), | ||||||
| 		systemUsers:          make(map[string]spec.PgUser), | 		systemUsers:      make(map[string]spec.PgUser), | ||||||
| 		podEvents:            make(chan spec.PodEvent), | 		podSubscribers:   make(map[spec.NamespacedName]chan spec.PodEvent), | ||||||
| 		podSubscribers:       make(map[spec.NamespacedName]chan spec.PodEvent), | 		kubeResources:    kubeResources, | ||||||
| 		kubeResources:        kubeResources, | 		masterLess:       false, | ||||||
| 		masterLess:           false, | 		userSyncStrategy: users.DefaultUserSyncStrategy{}, | ||||||
| 		podDispatcherRunning: false, | 		deleteOptions:    &v1.DeleteOptions{OrphanDependents: &orphanDependents}, | ||||||
| 		userSyncStrategy:     users.DefaultUserSyncStrategy{}, | 		podEventsQueue:   podEventsQueue, | ||||||
| 		deleteOptions:        &v1.DeleteOptions{OrphanDependents: &orphanDependents}, |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return cluster | 	return cluster | ||||||
|  | @ -143,16 +151,11 @@ func (c *Cluster) initUsers() error { | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) Create(stopCh <-chan struct{}) error { | func (c *Cluster) Create() error { | ||||||
| 	c.mu.Lock() | 	c.mu.Lock() | ||||||
| 	defer c.mu.Unlock() | 	defer c.mu.Unlock() | ||||||
| 	var err error | 	var err error | ||||||
| 
 | 
 | ||||||
| 	if !c.podDispatcherRunning { |  | ||||||
| 		go c.podEventsDispatcher(stopCh) |  | ||||||
| 		c.podDispatcherRunning = true |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	defer func() { | 	defer func() { | ||||||
| 		if err == nil { | 		if err == nil { | ||||||
| 			c.setStatus(spec.ClusterStatusRunning) //TODO: are you sure it's running?
 | 			c.setStatus(spec.ClusterStatusRunning) //TODO: are you sure it's running?
 | ||||||
|  | @ -460,7 +463,38 @@ func (c *Cluster) Delete() error { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) ReceivePodEvent(event spec.PodEvent) { | func (c *Cluster) ReceivePodEvent(event spec.PodEvent) { | ||||||
| 	c.podEvents <- event | 	c.podEventsQueue.Add(event) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) processPodEvent(obj interface{}) error { | ||||||
|  | 	event, ok := obj.(spec.PodEvent) | ||||||
|  | 	if !ok { | ||||||
|  | 		return fmt.Errorf("could not cast to PodEvent") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	c.podSubscribersMu.RLock() | ||||||
|  | 	subscriber, ok := c.podSubscribers[event.PodName] | ||||||
|  | 	c.podSubscribersMu.RUnlock() | ||||||
|  | 	if ok { | ||||||
|  | 		subscriber <- event | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) Run(stopCh <-chan struct{}) { | ||||||
|  | 	go c.processPodEventQueue(stopCh) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) processPodEventQueue(stopCh <-chan struct{}) { | ||||||
|  | 	for { | ||||||
|  | 		select { | ||||||
|  | 		case <-stopCh: | ||||||
|  | 			return | ||||||
|  | 		default: | ||||||
|  | 			c.podEventsQueue.Pop(cache.PopProcessFunc(c.processPodEvent)) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) initSystemUsers() { | func (c *Cluster) initSystemUsers() { | ||||||
|  |  | ||||||
|  | @ -146,23 +146,6 @@ func (c *Cluster) recreatePod(pod v1.Pod) error { | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) podEventsDispatcher(stopCh <-chan struct{}) { |  | ||||||
| 	c.logger.Infof("Watching '%s' cluster", c.ClusterName()) |  | ||||||
| 	for { |  | ||||||
| 		select { |  | ||||||
| 		case event := <-c.podEvents: |  | ||||||
| 			c.podSubscribersMu.RLock() |  | ||||||
| 			subscriber, ok := c.podSubscribers[event.PodName] |  | ||||||
| 			c.podSubscribersMu.RUnlock() |  | ||||||
| 			if ok { |  | ||||||
| 				go func() { subscriber <- event }() //TODO: is it a right way to do nonblocking send to the channel?
 |  | ||||||
| 			} |  | ||||||
| 		case <-stopCh: |  | ||||||
| 			return |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) recreatePods() error { | func (c *Cluster) recreatePods() error { | ||||||
| 	ls := c.labelsSet() | 	ls := c.labelsSet() | ||||||
| 	namespace := c.Metadata.Namespace | 	namespace := c.Metadata.Namespace | ||||||
|  |  | ||||||
|  | @ -7,7 +7,7 @@ import ( | ||||||
| 	"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" | 	"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) Sync(stopCh <-chan struct{}) error { | func (c *Cluster) Sync() error { | ||||||
| 	c.mu.Lock() | 	c.mu.Lock() | ||||||
| 	defer c.mu.Unlock() | 	defer c.mu.Unlock() | ||||||
| 
 | 
 | ||||||
|  | @ -16,11 +16,6 @@ func (c *Cluster) Sync(stopCh <-chan struct{}) error { | ||||||
| 		c.logger.Errorf("could not load resources: %v", err) | 		c.logger.Errorf("could not load resources: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if !c.podDispatcherRunning { |  | ||||||
| 		go c.podEventsDispatcher(stopCh) |  | ||||||
| 		c.podDispatcherRunning = true |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	c.logger.Debugf("Syncing secrets") | 	c.logger.Debugf("Syncing secrets") | ||||||
| 	if err := c.syncSecrets(); err != nil { | 	if err := c.syncSecrets(); err != nil { | ||||||
| 		if !k8sutil.ResourceAlreadyExists(err) { | 		if !k8sutil.ResourceAlreadyExists(err) { | ||||||
|  |  | ||||||
|  | @ -62,10 +62,11 @@ func (c *Controller) podAdd(obj interface{}) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	podEvent := spec.PodEvent{ | 	podEvent := spec.PodEvent{ | ||||||
| 		ClusterName: c.PodClusterName(pod), | 		ClusterName:     c.PodClusterName(pod), | ||||||
| 		PodName:     util.NameFromMeta(pod.ObjectMeta), | 		PodName:         util.NameFromMeta(pod.ObjectMeta), | ||||||
| 		CurPod:      pod, | 		CurPod:          pod, | ||||||
| 		EventType:   spec.EventAdd, | 		EventType:       spec.EventAdd, | ||||||
|  | 		ResourceVersion: pod.ResourceVersion, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.podCh <- podEvent | 	c.podCh <- podEvent | ||||||
|  | @ -83,11 +84,12 @@ func (c *Controller) podUpdate(prev, cur interface{}) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	podEvent := spec.PodEvent{ | 	podEvent := spec.PodEvent{ | ||||||
| 		ClusterName: c.PodClusterName(curPod), | 		ClusterName:     c.PodClusterName(curPod), | ||||||
| 		PodName:     util.NameFromMeta(curPod.ObjectMeta), | 		PodName:         util.NameFromMeta(curPod.ObjectMeta), | ||||||
| 		PrevPod:     prevPod, | 		PrevPod:         prevPod, | ||||||
| 		CurPod:      curPod, | 		CurPod:          curPod, | ||||||
| 		EventType:   spec.EventUpdate, | 		EventType:       spec.EventUpdate, | ||||||
|  | 		ResourceVersion: curPod.ResourceVersion, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.podCh <- podEvent | 	c.podCh <- podEvent | ||||||
|  | @ -100,27 +102,28 @@ func (c *Controller) podDelete(obj interface{}) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	podEvent := spec.PodEvent{ | 	podEvent := spec.PodEvent{ | ||||||
| 		ClusterName: c.PodClusterName(pod), | 		ClusterName:     c.PodClusterName(pod), | ||||||
| 		PodName:     util.NameFromMeta(pod.ObjectMeta), | 		PodName:         util.NameFromMeta(pod.ObjectMeta), | ||||||
| 		CurPod:      pod, | 		CurPod:          pod, | ||||||
| 		EventType:   spec.EventDelete, | 		EventType:       spec.EventDelete, | ||||||
|  | 		ResourceVersion: pod.ResourceVersion, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.podCh <- podEvent | 	c.podCh <- podEvent | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Controller) podEventsDispatcher(stopCh <-chan struct{}) { | func (c *Controller) podEventsDispatcher(stopCh <-chan struct{}) { | ||||||
| 	c.logger.Infof("Watching all pod events") | 	c.logger.Debugln("Watching all pod events") | ||||||
| 	for { | 	for { | ||||||
| 		select { | 		select { | ||||||
| 		case event := <-c.podCh: | 		case event := <-c.podCh: | ||||||
| 			c.clustersMu.RLock() | 			c.clustersMu.RLock() | ||||||
| 			subscriber, ok := c.clusters[event.ClusterName] | 			cluster, ok := c.clusters[event.ClusterName] | ||||||
| 			c.clustersMu.RUnlock() | 			c.clustersMu.RUnlock() | ||||||
| 
 | 
 | ||||||
| 			if ok { | 			if ok { | ||||||
| 				c.logger.Debugf("Sending %s event of pod '%s' to the '%s' cluster channel", event.EventType, event.PodName, event.ClusterName) | 				c.logger.Debugf("Sending %s event of pod '%s' to the '%s' cluster channel", event.EventType, event.PodName, event.ClusterName) | ||||||
| 				go subscriber.ReceivePodEvent(event) | 				cluster.ReceivePodEvent(event) | ||||||
| 			} | 			} | ||||||
| 		case <-stopCh: | 		case <-stopCh: | ||||||
| 			return | 			return | ||||||
|  |  | ||||||
|  | @ -91,7 +91,6 @@ func (c *Controller) processEvent(obj interface{}) error { | ||||||
| 
 | 
 | ||||||
| 	c.clustersMu.RLock() | 	c.clustersMu.RLock() | ||||||
| 	cl, clusterFound := c.clusters[clusterName] | 	cl, clusterFound := c.clusters[clusterName] | ||||||
| 	stopCh := c.stopChs[clusterName] |  | ||||||
| 	c.clustersMu.RUnlock() | 	c.clustersMu.RUnlock() | ||||||
| 
 | 
 | ||||||
| 	switch event.EventType { | 	switch event.EventType { | ||||||
|  | @ -105,13 +104,14 @@ func (c *Controller) processEvent(obj interface{}) error { | ||||||
| 
 | 
 | ||||||
| 		stopCh := make(chan struct{}) | 		stopCh := make(chan struct{}) | ||||||
| 		cl = cluster.New(c.makeClusterConfig(), *event.NewSpec, logger) | 		cl = cluster.New(c.makeClusterConfig(), *event.NewSpec, logger) | ||||||
|  | 		cl.Run(stopCh) | ||||||
| 
 | 
 | ||||||
| 		c.clustersMu.Lock() | 		c.clustersMu.Lock() | ||||||
| 		c.clusters[clusterName] = cl | 		c.clusters[clusterName] = cl | ||||||
| 		c.stopChs[clusterName] = stopCh | 		c.stopChs[clusterName] = stopCh | ||||||
| 		c.clustersMu.Unlock() | 		c.clustersMu.Unlock() | ||||||
| 
 | 
 | ||||||
| 		if err := cl.Create(stopCh); err != nil { | 		if err := cl.Create(); err != nil { | ||||||
| 			cl.Error = fmt.Errorf("could not create cluster: %v", err) | 			cl.Error = fmt.Errorf("could not create cluster: %v", err) | ||||||
| 			logger.Errorf("%v", cl.Error) | 			logger.Errorf("%v", cl.Error) | ||||||
| 
 | 
 | ||||||
|  | @ -158,8 +158,9 @@ func (c *Controller) processEvent(obj interface{}) error { | ||||||
| 
 | 
 | ||||||
| 		// no race condition because a cluster is always processed by single worker
 | 		// no race condition because a cluster is always processed by single worker
 | ||||||
| 		if !clusterFound { | 		if !clusterFound { | ||||||
|  | 			stopCh := make(chan struct{}) | ||||||
| 			cl = cluster.New(c.makeClusterConfig(), *event.NewSpec, logger) | 			cl = cluster.New(c.makeClusterConfig(), *event.NewSpec, logger) | ||||||
| 			stopCh = make(chan struct{}) | 			cl.Run(stopCh) | ||||||
| 
 | 
 | ||||||
| 			c.clustersMu.Lock() | 			c.clustersMu.Lock() | ||||||
| 			c.clusters[clusterName] = cl | 			c.clusters[clusterName] = cl | ||||||
|  | @ -167,7 +168,7 @@ func (c *Controller) processEvent(obj interface{}) error { | ||||||
| 			c.clustersMu.Unlock() | 			c.clustersMu.Unlock() | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if err := cl.Sync(stopCh); err != nil { | 		if err := cl.Sync(); err != nil { | ||||||
| 			cl.Error = fmt.Errorf("could not sync cluster '%s': %s", clusterName, err) | 			cl.Error = fmt.Errorf("could not sync cluster '%s': %s", clusterName, err) | ||||||
| 			logger.Errorf("%v", cl) | 			logger.Errorf("%v", cl) | ||||||
| 			return nil | 			return nil | ||||||
|  |  | ||||||
|  | @ -34,11 +34,12 @@ const ( | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type PodEvent struct { | type PodEvent struct { | ||||||
| 	ClusterName NamespacedName | 	ResourceVersion string | ||||||
| 	PodName     NamespacedName | 	ClusterName     NamespacedName | ||||||
| 	PrevPod     *v1.Pod | 	PodName         NamespacedName | ||||||
| 	CurPod      *v1.Pod | 	PrevPod         *v1.Pod | ||||||
| 	EventType   EventType | 	CurPod          *v1.Pod | ||||||
|  | 	EventType       EventType | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type PgUser struct { | type PgUser struct { | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue