remove streamType field

This commit is contained in:
Felix Kunde 2021-09-22 11:47:31 +02:00
parent a17d63088b
commit 223ffa75ef
10 changed files with 40 additions and 96 deletions

View File

@ -477,6 +477,9 @@ spec:
nullable: true nullable: true
items: items:
type: object type: object
required:
- database
- tables
properties: properties:
batchSize: batchSize:
type: integer type: integer
@ -490,11 +493,6 @@ spec:
type: object type: object
additionalProperties: additionalProperties:
type: string type: string
streamType:
type: string
enum:
- "nakadi"
- "wal"
teamId: teamId:
type: string type: string
tls: tls:

View File

@ -522,27 +522,24 @@ under the `streams` top-level key will be used by the operator to create a
CRD for Zalando's internal CDC operator named like the Postgres cluster. CRD for Zalando's internal CDC operator named like the Postgres cluster.
Each stream object can have the following properties: Each stream object can have the following properties:
* **streamType**
Defines the stream flow. Choose `nakadi` when you want to specify certain
nakadi event types of or `wal` if changes should be mapped to a generic
event type. Default is `wal`.
* **database** * **database**
Name of the database from where events will be published via Postgres' Name of the database from where events will be published via Postgres'
logical decoding feature. The operator will take care of updating the logical decoding feature. The operator will take care of updating the
database configuration (setting `wal_level: logical`, creating logical database configuration (setting `wal_level: logical`, creating logical
replication slots, using output plugin `wal2json` and creating a dedicated replication slots, using output plugin `wal2json` and creating a dedicated
replication user). replication user). Required.
* **tables** * **tables**
Defines a map of table names and event types. The CDC operator is following Defines a map of table names and event types. The CDC operator is following
the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/) the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/)
meaning changes are only consumed from an extra table that already has the meaning changes are only consumed from an extra table that already has the
structure of the event in the target sink. The operator will assume that this structure of the event in the target sink. The operator will assume that this
outbox table is called like `<table>_<event_type>_outbox`. outbox table is called like `<table>_<event_type>_outbox`. Required.
* **filter** * **filter**
Streamed events can be filtered by a jsonpath expression for each table. Streamed events can be filtered by a jsonpath expression for each table.
Optional.
* **batchSize** * **batchSize**
Defines the size of batches in which events are consumed. Defines the size of batches in which events are consumed. Optional.
Defaults to 1.

View File

@ -198,15 +198,11 @@ spec:
# Enables change data capture streams for defined database tables # Enables change data capture streams for defined database tables
# streams: # streams:
# - streamType: nakadi # - database: foo
# batchSize: 100
# database: foo
# tables: # tables:
# ta: event_type_a # data.ta: event_type_a
# tb: event_type_b # data.tb: event_type_b
# - streamType: wal # # Optional. Filter ignores events before a certain txnId and lsn. Can be used to skip bad events
# batchSize: 100 # filter:
# database: foo # data.ta: "[?(@.source.txId > 500 && @.source.lsn > 123456)]"
# tables: # batchSize: 1000
# public.tx: event_type_a
# public.ty: event_type_b

View File

@ -473,6 +473,9 @@ spec:
nullable: true nullable: true
items: items:
type: object type: object
required:
- database
- tables
properties: properties:
batchSize: batchSize:
type: integer type: integer
@ -486,11 +489,6 @@ spec:
type: object type: object
additionalProperties: additionalProperties:
type: string type: string
streamType:
type: string
enum:
- "nakadi"
- "wal"
teamId: teamId:
type: string type: string
tls: tls:

View File

@ -664,7 +664,8 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
Type: "array", Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{ Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{ Schema: &apiextv1.JSONSchemaProps{
Type: "object", Type: "object",
Required: []string{"database", "tables"},
Properties: map[string]apiextv1.JSONSchemaProps{ Properties: map[string]apiextv1.JSONSchemaProps{
"batchSize": { "batchSize": {
Type: "integer", Type: "integer",
@ -688,17 +689,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
}, },
}, },
}, },
"streamType": {
Type: "string",
Enum: []apiextv1.JSON{
{
Raw: []byte(`"nakadi"`),
},
{
Raw: []byte(`"wal"`),
},
},
},
}, },
}, },
}, },

View File

@ -229,9 +229,8 @@ type ConnectionPooler struct {
} }
type Stream struct { type Stream struct {
StreamType string `json:"streamType,omitempty"` Database string `json:"database"`
Database string `json:"database,omitempty"` Tables map[string]string `json:"tables"`
Tables map[string]string `json:"tables,omitempty"` Filter map[string]string `json:"filter,omitempty"`
Filter map[string]string `json:"filter,omitempty"` BatchSize uint32 `json:"batchSize,omitempty"`
BatchSize uint32 `json:"batchSize,omitempty"`
} }

View File

@ -40,12 +40,8 @@ type EventStream struct {
// EventStreamFlow defines the flow characteristics of the event stream // EventStreamFlow defines the flow characteristics of the event stream
type EventStreamFlow struct { type EventStreamFlow struct {
Type string `json:"type"` Type string `json:"type"`
DataTypeColumn string `json:"dataTypeColumn,omitempty"` PayloadColumn string `json:"payloadColumn,omitempty" defaults:"payload"`
DataOpColumn string `json:"dataOpColumn,omitempty"`
MetadataColumn string `json:"metadataColumn,omitempty"`
DataColumn string `json:"dataColumn,omitempty"`
PayloadColumn string `json:"payloadColumn,omitempty"`
} }
// EventStreamSink defines the target of the event stream // EventStreamSink defines the target of the event stream
@ -67,7 +63,7 @@ type EventStreamSource struct {
// EventStreamTable defines the name and ID column to be used for streaming // EventStreamTable defines the name and ID column to be used for streaming
type EventStreamTable struct { type EventStreamTable struct {
Name string `json:"name"` Name string `json:"name"`
IDColumn string `json:"idColumn,omitempty"` IDColumn string `json:"idColumn,omitempty" defaults:"id"`
} }
// Connection to be used for allowing the FES operator to connect to a database // Connection to be used for allowing the FES operator to connect to a database

View File

@ -150,23 +150,9 @@ func (c *Cluster) getEventStreamSource(stream acidv1.Stream, table, eventType st
} }
func getEventStreamFlow(stream acidv1.Stream) zalandov1alpha1.EventStreamFlow { func getEventStreamFlow(stream acidv1.Stream) zalandov1alpha1.EventStreamFlow {
switch stream.StreamType { return zalandov1alpha1.EventStreamFlow{
case "nakadi": Type: constants.EventStreamFlowPgGenericType,
return zalandov1alpha1.EventStreamFlow{
Type: constants.EventStreamFlowPgNakadiType,
DataTypeColumn: constants.EventStreamFlowDataTypeColumn,
DataOpColumn: constants.EventStreamFlowDataOpColumn,
MetadataColumn: constants.EventStreamFlowMetadataColumn,
DataColumn: constants.EventStreamFlowDataColumn,
}
case "wal":
return zalandov1alpha1.EventStreamFlow{
Type: constants.EventStreamFlowPgGenericType,
PayloadColumn: constants.EventStreamFlowPayloadColumn,
}
} }
return zalandov1alpha1.EventStreamFlow{}
} }
func getEventStreamSink(stream acidv1.Stream, eventType string) zalandov1alpha1.EventStreamSink { func getEventStreamSink(stream acidv1.Stream, eventType string) zalandov1alpha1.EventStreamSink {
@ -190,8 +176,7 @@ func getTableSchema(fullTableName string) (tableName, schemaName string) {
func getOutboxTable(tableName, eventType string) zalandov1alpha1.EventStreamTable { func getOutboxTable(tableName, eventType string) zalandov1alpha1.EventStreamTable {
return zalandov1alpha1.EventStreamTable{ return zalandov1alpha1.EventStreamTable{
Name: outboxTableNameTemplate.Format("table", tableName, "eventtype", eventType), Name: outboxTableNameTemplate.Format("table", tableName, "eventtype", eventType),
IDColumn: "id",
} }
} }
@ -236,15 +221,16 @@ func (c *Cluster) syncStreams() error {
c.logger.Infof("event streams do not exist, create it") c.logger.Infof("event streams do not exist, create it")
err := c.createStreams() err := c.createStreams()
if err != nil { if err != nil {
return fmt.Errorf("event stream creation failed: %v", err) return fmt.Errorf("event streams creation failed: %v", err)
} }
} else { } else {
desiredStreams := c.generateFabricEventStream() desiredStreams := c.generateFabricEventStream()
if !reflect.DeepEqual(effectiveStreams.Spec, desiredStreams.Spec) { if !reflect.DeepEqual(effectiveStreams.Spec, desiredStreams.Spec) {
c.logger.Debug("updating event streams")
desiredStreams.ObjectMeta.ResourceVersion = effectiveStreams.ObjectMeta.ResourceVersion desiredStreams.ObjectMeta.ResourceVersion = effectiveStreams.ObjectMeta.ResourceVersion
err = c.updateStreams(desiredStreams) err = c.updateStreams(desiredStreams)
if err != nil { if err != nil {
return fmt.Errorf("event stream update failed: %v", err) return fmt.Errorf("event streams update failed: %v", err)
} }
} }
} }

View File

@ -47,16 +47,7 @@ var (
}, },
Streams: []acidv1.Stream{ Streams: []acidv1.Stream{
{ {
StreamType: "nakadi", Database: "foo",
Database: "foo",
Tables: map[string]string{
"bar": "stream_type_a",
},
BatchSize: uint32(100),
},
{
StreamType: "wal",
Database: "foo",
Tables: map[string]string{ Tables: map[string]string{
"bar": "stream_type_a", "bar": "stream_type_a",
}, },
@ -132,8 +123,7 @@ func TestUpdateFabricEventStream(t *testing.T) {
var pgSpec acidv1.PostgresSpec var pgSpec acidv1.PostgresSpec
pgSpec.Streams = []acidv1.Stream{ pgSpec.Streams = []acidv1.Stream{
{ {
StreamType: "nakadi", Database: "foo",
Database: "foo",
Tables: map[string]string{ Tables: map[string]string{
"bar": "stream_type_b", "bar": "stream_type_b",
}, },

View File

@ -2,15 +2,9 @@ package constants
// PostgreSQL specific constants // PostgreSQL specific constants
const ( const (
EventStreamSourcePGType = "PostgresLogicalReplication" EventStreamSourcePGType = "PostgresLogicalReplication"
EventStreamSourceSlotPrefix = "fes" EventStreamSourceSlotPrefix = "fes"
EventStreamSourceAuthType = "DatabaseAuthenticationSecret" EventStreamSourceAuthType = "DatabaseAuthenticationSecret"
EventStreamFlowPgNakadiType = "PostgresWalToNakadiDataEvent" EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent"
EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent" EventStreamSinkNakadiType = "Nakadi"
EventStreamFlowDataTypeColumn = "data_type"
EventStreamFlowDataOpColumn = "data_op"
EventStreamFlowMetadataColumn = "metadata"
EventStreamFlowDataColumn = "data"
EventStreamFlowPayloadColumn = "payload"
EventStreamSinkNakadiType = "Nakadi"
) )