fix stream duplication on operator restart (#2733)
* fix stream duplication on operator restart * add try except to streams e2e test
This commit is contained in:
		
							parent
							
								
									c7ee34ed12
								
							
						
					
					
						commit
						2f7e3ee847
					
				|  | @ -2131,6 +2131,8 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|             verbs=["create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"] |             verbs=["create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"] | ||||||
|         ) |         ) | ||||||
|         cluster_role.rules.append(fes_cluster_role_rule) |         cluster_role.rules.append(fes_cluster_role_rule) | ||||||
|  | 
 | ||||||
|  |         try: | ||||||
|             k8s.api.rbac_api.patch_cluster_role("postgres-operator", cluster_role) |             k8s.api.rbac_api.patch_cluster_role("postgres-operator", cluster_role) | ||||||
| 
 | 
 | ||||||
|             # create a table in one of the database of acid-minimal-cluster |             # create a table in one of the database of acid-minimal-cluster | ||||||
|  | @ -2256,6 +2258,10 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_nonstream_publication_query)), 1, |             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_nonstream_publication_query)), 1, | ||||||
|                 "Publication defined not in stream section is deleted", 10, 5) |                 "Publication defined not in stream section is deleted", 10, 5) | ||||||
| 
 | 
 | ||||||
|  |         except timeout_decorator.TimeoutError: | ||||||
|  |             print('Operator log: {}'.format(k8s.get_operator_log())) | ||||||
|  |             raise | ||||||
|  | 
 | ||||||
|     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|     def test_taint_based_eviction(self): |     def test_taint_based_eviction(self): | ||||||
|         ''' |         ''' | ||||||
|  |  | ||||||
|  | @ -433,23 +433,46 @@ func hasSlotsInSync(appId string, databaseSlots map[string]map[string]zalandov1. | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) syncStream(appId string) error { | func (c *Cluster) syncStream(appId string) error { | ||||||
|  | 	var ( | ||||||
|  | 		streams *zalandov1.FabricEventStreamList | ||||||
|  | 		err     error | ||||||
|  | 	) | ||||||
|  | 	c.setProcessName("syncing stream with applicationId %s", appId) | ||||||
|  | 	c.logger.Debugf("syncing stream with applicationId %s", appId) | ||||||
|  | 
 | ||||||
|  | 	listOptions := metav1.ListOptions{LabelSelector: c.labelsSet(true).String()} | ||||||
|  | 	streams, err = c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("could not list of FabricEventStreams for applicationId %s: %v", appId, err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	streamExists := false | 	streamExists := false | ||||||
| 	// update stream when it exists and EventStreams array differs
 | 	for _, stream := range streams.Items { | ||||||
| 	for _, stream := range c.Streams { | 		if stream.Spec.ApplicationId != appId { | ||||||
| 		if appId == stream.Spec.ApplicationId { | 			continue | ||||||
|  | 		} | ||||||
|  | 		if streamExists { | ||||||
|  | 			c.logger.Warningf("more than one event stream with applicationId %s found, delete it", appId) | ||||||
|  | 			if err = c.KubeClient.FabricEventStreams(stream.ObjectMeta.Namespace).Delete(context.TODO(), stream.ObjectMeta.Name, metav1.DeleteOptions{}); err != nil { | ||||||
|  | 				c.logger.Errorf("could not delete event stream %q with applicationId %s: %v", stream.ObjectMeta.Name, appId, err) | ||||||
|  | 			} else { | ||||||
|  | 				c.logger.Infof("redundant event stream %q with applicationId %s has been successfully deleted", stream.ObjectMeta.Name, appId) | ||||||
|  | 			} | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
| 		streamExists = true | 		streamExists = true | ||||||
| 		desiredStreams := c.generateFabricEventStream(appId) | 		desiredStreams := c.generateFabricEventStream(appId) | ||||||
| 		if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) { | 		if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) { | ||||||
| 			c.logger.Infof("owner references of event streams with applicationId %s do not match the current ones", appId) | 			c.logger.Infof("owner references of event streams with applicationId %s do not match the current ones", appId) | ||||||
| 			stream.ObjectMeta.OwnerReferences = desiredStreams.ObjectMeta.OwnerReferences | 			stream.ObjectMeta.OwnerReferences = desiredStreams.ObjectMeta.OwnerReferences | ||||||
| 			c.setProcessName("updating event streams with applicationId %s", appId) | 			c.setProcessName("updating event streams with applicationId %s", appId) | ||||||
| 				stream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), stream, metav1.UpdateOptions{}) | 			stream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), &stream, metav1.UpdateOptions{}) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return fmt.Errorf("could not update event streams with applicationId %s: %v", appId, err) | 				return fmt.Errorf("could not update event streams with applicationId %s: %v", appId, err) | ||||||
| 			} | 			} | ||||||
| 			c.Streams[appId] = stream | 			c.Streams[appId] = stream | ||||||
| 		} | 		} | ||||||
| 			if match, reason := c.compareStreams(stream, desiredStreams); !match { | 		if match, reason := c.compareStreams(&stream, desiredStreams); !match { | ||||||
| 			c.logger.Debugf("updating event streams with applicationId %s: %s", appId, reason) | 			c.logger.Debugf("updating event streams with applicationId %s: %s", appId, reason) | ||||||
| 			desiredStreams.ObjectMeta = stream.ObjectMeta | 			desiredStreams.ObjectMeta = stream.ObjectMeta | ||||||
| 			updatedStream, err := c.updateStreams(desiredStreams) | 			updatedStream, err := c.updateStreams(desiredStreams) | ||||||
|  | @ -459,8 +482,6 @@ func (c *Cluster) syncStream(appId string) error { | ||||||
| 			c.Streams[appId] = updatedStream | 			c.Streams[appId] = updatedStream | ||||||
| 			c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId) | 			c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId) | ||||||
| 		} | 		} | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if !streamExists { | 	if !streamExists { | ||||||
|  |  | ||||||
|  | @ -2,6 +2,7 @@ package cluster | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"fmt" | 	"fmt" | ||||||
|  | 	"reflect" | ||||||
| 	"strings" | 	"strings" | ||||||
| 
 | 
 | ||||||
| 	"context" | 	"context" | ||||||
|  | @ -87,6 +88,11 @@ var ( | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
| 			Name:      fmt.Sprintf("%s-12345", clusterName), | 			Name:      fmt.Sprintf("%s-12345", clusterName), | ||||||
| 			Namespace: namespace, | 			Namespace: namespace, | ||||||
|  | 			Labels: map[string]string{ | ||||||
|  | 				"application":  "spilo", | ||||||
|  | 				"cluster-name": fmt.Sprintf("%s-2", clusterName), | ||||||
|  | 				"team":         "acid", | ||||||
|  | 			}, | ||||||
| 			OwnerReferences: []metav1.OwnerReference{ | 			OwnerReferences: []metav1.OwnerReference{ | ||||||
| 				metav1.OwnerReference{ | 				metav1.OwnerReference{ | ||||||
| 					APIVersion: "apps/v1", | 					APIVersion: "apps/v1", | ||||||
|  | @ -432,12 +438,8 @@ func TestGenerateFabricEventStream(t *testing.T) { | ||||||
| 	cluster.Name = clusterName | 	cluster.Name = clusterName | ||||||
| 	cluster.Namespace = namespace | 	cluster.Namespace = namespace | ||||||
| 
 | 
 | ||||||
| 	// create statefulset to have ownerReference for streams
 |  | ||||||
| 	_, err := cluster.createStatefulSet() |  | ||||||
| 	assert.NoError(t, err) |  | ||||||
| 
 |  | ||||||
| 	// create the streams
 | 	// create the streams
 | ||||||
| 	err = cluster.syncStream(appId) | 	err := cluster.syncStream(appId) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	// compare generated stream with expected stream
 | 	// compare generated stream with expected stream
 | ||||||
|  | @ -451,11 +453,7 @@ func TestGenerateFabricEventStream(t *testing.T) { | ||||||
| 	} | 	} | ||||||
| 	streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | 	streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 	assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only one", len(streams.Items)) | ||||||
| 	// check if there is only one stream
 |  | ||||||
| 	if len(streams.Items) > 1 { |  | ||||||
| 		t.Errorf("too many stream CRDs found: got %d, but expected only one", len(streams.Items)) |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	// compare stream returned from API with expected stream
 | 	// compare stream returned from API with expected stream
 | ||||||
| 	if match, _ := cluster.compareStreams(&streams.Items[0], fes); !match { | 	if match, _ := cluster.compareStreams(&streams.Items[0], fes); !match { | ||||||
|  | @ -468,11 +466,7 @@ func TestGenerateFabricEventStream(t *testing.T) { | ||||||
| 
 | 
 | ||||||
| 	streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | 	streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 	assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only one", len(streams.Items)) | ||||||
| 	// check if there is still only one stream
 |  | ||||||
| 	if len(streams.Items) > 1 { |  | ||||||
| 		t.Errorf("too many stream CRDs found after sync: got %d, but expected only one", len(streams.Items)) |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	// compare stream resturned from API with generated stream
 | 	// compare stream resturned from API with generated stream
 | ||||||
| 	if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { | 	if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { | ||||||
|  | @ -493,6 +487,62 @@ func newFabricEventStream(streams []zalandov1.EventStream, annotations map[strin | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func TestSyncStreams(t *testing.T) { | ||||||
|  | 	pg.Name = fmt.Sprintf("%s-2", pg.Name) | ||||||
|  | 	var cluster = New( | ||||||
|  | 		Config{ | ||||||
|  | 			OpConfig: config.Config{ | ||||||
|  | 				PodManagementPolicy: "ordered_ready", | ||||||
|  | 				Resources: config.Resources{ | ||||||
|  | 					ClusterLabels:         map[string]string{"application": "spilo"}, | ||||||
|  | 					ClusterNameLabel:      "cluster-name", | ||||||
|  | 					DefaultCPURequest:     "300m", | ||||||
|  | 					DefaultCPULimit:       "300m", | ||||||
|  | 					DefaultMemoryRequest:  "300Mi", | ||||||
|  | 					DefaultMemoryLimit:    "300Mi", | ||||||
|  | 					EnableOwnerReferences: util.True(), | ||||||
|  | 					PodRoleLabel:          "spilo-role", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}, client, pg, logger, eventRecorder) | ||||||
|  | 
 | ||||||
|  | 	_, err := cluster.KubeClient.Postgresqls(namespace).Create( | ||||||
|  | 		context.TODO(), &pg, metav1.CreateOptions{}) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// create the stream
 | ||||||
|  | 	err = cluster.syncStream(appId) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// create a second stream with same spec but with different name
 | ||||||
|  | 	createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create( | ||||||
|  | 		context.TODO(), fes, metav1.CreateOptions{}) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	assert.Equal(t, createdStream.Spec.ApplicationId, appId) | ||||||
|  | 
 | ||||||
|  | 	// check that two streams exist
 | ||||||
|  | 	listOptions := metav1.ListOptions{ | ||||||
|  | 		LabelSelector: cluster.labelsSet(true).String(), | ||||||
|  | 	} | ||||||
|  | 	streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	assert.Equalf(t, 2, len(streams.Items), "unexpected number of streams found: got %d, but expected only 2", len(streams.Items)) | ||||||
|  | 
 | ||||||
|  | 	// sync the stream which should remove the redundant stream
 | ||||||
|  | 	err = cluster.syncStream(appId) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// check that only one stream remains after sync
 | ||||||
|  | 	streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items)) | ||||||
|  | 
 | ||||||
|  | 	// check owner references
 | ||||||
|  | 	if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) { | ||||||
|  | 		t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func TestSameStreams(t *testing.T) { | func TestSameStreams(t *testing.T) { | ||||||
| 	testName := "TestSameStreams" | 	testName := "TestSameStreams" | ||||||
| 	annotationsA := map[string]string{"owned-by": "acid"} | 	annotationsA := map[string]string{"owned-by": "acid"} | ||||||
|  | @ -606,8 +656,8 @@ func TestSameStreams(t *testing.T) { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestUpdateFabricEventStream(t *testing.T) { | func TestUpdateStreams(t *testing.T) { | ||||||
| 	pg.Name = fmt.Sprintf("%s-2", pg.Name) | 	pg.Name = fmt.Sprintf("%s-3", pg.Name) | ||||||
| 	var cluster = New( | 	var cluster = New( | ||||||
| 		Config{ | 		Config{ | ||||||
| 			OpConfig: config.Config{ | 			OpConfig: config.Config{ | ||||||
|  | @ -628,11 +678,7 @@ func TestUpdateFabricEventStream(t *testing.T) { | ||||||
| 		context.TODO(), &pg, metav1.CreateOptions{}) | 		context.TODO(), &pg, metav1.CreateOptions{}) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	// create statefulset to have ownerReference for streams
 | 	// create the stream
 | ||||||
| 	_, err = cluster.createStatefulSet() |  | ||||||
| 	assert.NoError(t, err) |  | ||||||
| 
 |  | ||||||
| 	// now create the stream
 |  | ||||||
| 	err = cluster.syncStream(appId) | 	err = cluster.syncStream(appId) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue