remove streamType field
This commit is contained in:
parent
a17d63088b
commit
223ffa75ef
|
|
@ -477,6 +477,9 @@ spec:
|
|||
nullable: true
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- database
|
||||
- tables
|
||||
properties:
|
||||
batchSize:
|
||||
type: integer
|
||||
|
|
@ -490,11 +493,6 @@ spec:
|
|||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
streamType:
|
||||
type: string
|
||||
enum:
|
||||
- "nakadi"
|
||||
- "wal"
|
||||
teamId:
|
||||
type: string
|
||||
tls:
|
||||
|
|
|
|||
|
|
@ -522,27 +522,24 @@ under the `streams` top-level key will be used by the operator to create a
|
|||
CRD for Zalando's internal CDC operator named like the Postgres cluster.
|
||||
Each stream object can have the following properties:
|
||||
|
||||
* **streamType**
|
||||
Defines the stream flow. Choose `nakadi` when you want to specify certain
|
||||
nakadi event types of or `wal` if changes should be mapped to a generic
|
||||
event type. Default is `wal`.
|
||||
|
||||
* **database**
|
||||
Name of the database from where events will be published via Postgres'
|
||||
logical decoding feature. The operator will take care of updating the
|
||||
database configuration (setting `wal_level: logical`, creating logical
|
||||
replication slots, using output plugin `wal2json` and creating a dedicated
|
||||
replication user).
|
||||
replication user). Required.
|
||||
|
||||
* **tables**
|
||||
Defines a map of table names and event types. The CDC operator is following
|
||||
the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/)
|
||||
meaning changes are only consumed from an extra table that already has the
|
||||
structure of the event in the target sink. The operator will assume that this
|
||||
outbox table is called like `<table>_<event_type>_outbox`.
|
||||
outbox table is called like `<table>_<event_type>_outbox`. Required.
|
||||
|
||||
* **filter**
|
||||
Streamed events can be filtered by a jsonpath expression for each table.
|
||||
Optional.
|
||||
|
||||
* **batchSize**
|
||||
Defines the size of batches in which events are consumed.
|
||||
Defines the size of batches in which events are consumed. Optional.
|
||||
Defaults to 1.
|
||||
|
|
|
|||
|
|
@ -198,15 +198,11 @@ spec:
|
|||
|
||||
# Enables change data capture streams for defined database tables
|
||||
# streams:
|
||||
# - streamType: nakadi
|
||||
# batchSize: 100
|
||||
# database: foo
|
||||
# - database: foo
|
||||
# tables:
|
||||
# ta: event_type_a
|
||||
# tb: event_type_b
|
||||
# - streamType: wal
|
||||
# batchSize: 100
|
||||
# database: foo
|
||||
# tables:
|
||||
# public.tx: event_type_a
|
||||
# public.ty: event_type_b
|
||||
# data.ta: event_type_a
|
||||
# data.tb: event_type_b
|
||||
# # Optional. Filter ignores events before a certain txnId and lsn. Can be used to skip bad events
|
||||
# filter:
|
||||
# data.ta: "[?(@.source.txId > 500 && @.source.lsn > 123456)]"
|
||||
# batchSize: 1000
|
||||
|
|
|
|||
|
|
@ -473,6 +473,9 @@ spec:
|
|||
nullable: true
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- database
|
||||
- tables
|
||||
properties:
|
||||
batchSize:
|
||||
type: integer
|
||||
|
|
@ -486,11 +489,6 @@ spec:
|
|||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
streamType:
|
||||
type: string
|
||||
enum:
|
||||
- "nakadi"
|
||||
- "wal"
|
||||
teamId:
|
||||
type: string
|
||||
tls:
|
||||
|
|
|
|||
|
|
@ -664,7 +664,8 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Type: "object",
|
||||
Required: []string{"database", "tables"},
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"batchSize": {
|
||||
Type: "integer",
|
||||
|
|
@ -688,17 +689,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"streamType": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1.JSON{
|
||||
{
|
||||
Raw: []byte(`"nakadi"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"wal"`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -229,9 +229,8 @@ type ConnectionPooler struct {
|
|||
}
|
||||
|
||||
type Stream struct {
|
||||
StreamType string `json:"streamType,omitempty"`
|
||||
Database string `json:"database,omitempty"`
|
||||
Tables map[string]string `json:"tables,omitempty"`
|
||||
Filter map[string]string `json:"filter,omitempty"`
|
||||
BatchSize uint32 `json:"batchSize,omitempty"`
|
||||
Database string `json:"database"`
|
||||
Tables map[string]string `json:"tables"`
|
||||
Filter map[string]string `json:"filter,omitempty"`
|
||||
BatchSize uint32 `json:"batchSize,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,12 +40,8 @@ type EventStream struct {
|
|||
|
||||
// EventStreamFlow defines the flow characteristics of the event stream
|
||||
type EventStreamFlow struct {
|
||||
Type string `json:"type"`
|
||||
DataTypeColumn string `json:"dataTypeColumn,omitempty"`
|
||||
DataOpColumn string `json:"dataOpColumn,omitempty"`
|
||||
MetadataColumn string `json:"metadataColumn,omitempty"`
|
||||
DataColumn string `json:"dataColumn,omitempty"`
|
||||
PayloadColumn string `json:"payloadColumn,omitempty"`
|
||||
Type string `json:"type"`
|
||||
PayloadColumn string `json:"payloadColumn,omitempty" defaults:"payload"`
|
||||
}
|
||||
|
||||
// EventStreamSink defines the target of the event stream
|
||||
|
|
@ -67,7 +63,7 @@ type EventStreamSource struct {
|
|||
// EventStreamTable defines the name and ID column to be used for streaming
|
||||
type EventStreamTable struct {
|
||||
Name string `json:"name"`
|
||||
IDColumn string `json:"idColumn,omitempty"`
|
||||
IDColumn string `json:"idColumn,omitempty" defaults:"id"`
|
||||
}
|
||||
|
||||
// Connection to be used for allowing the FES operator to connect to a database
|
||||
|
|
|
|||
|
|
@ -150,23 +150,9 @@ func (c *Cluster) getEventStreamSource(stream acidv1.Stream, table, eventType st
|
|||
}
|
||||
|
||||
func getEventStreamFlow(stream acidv1.Stream) zalandov1alpha1.EventStreamFlow {
|
||||
switch stream.StreamType {
|
||||
case "nakadi":
|
||||
return zalandov1alpha1.EventStreamFlow{
|
||||
Type: constants.EventStreamFlowPgNakadiType,
|
||||
DataTypeColumn: constants.EventStreamFlowDataTypeColumn,
|
||||
DataOpColumn: constants.EventStreamFlowDataOpColumn,
|
||||
MetadataColumn: constants.EventStreamFlowMetadataColumn,
|
||||
DataColumn: constants.EventStreamFlowDataColumn,
|
||||
}
|
||||
case "wal":
|
||||
return zalandov1alpha1.EventStreamFlow{
|
||||
Type: constants.EventStreamFlowPgGenericType,
|
||||
PayloadColumn: constants.EventStreamFlowPayloadColumn,
|
||||
}
|
||||
return zalandov1alpha1.EventStreamFlow{
|
||||
Type: constants.EventStreamFlowPgGenericType,
|
||||
}
|
||||
|
||||
return zalandov1alpha1.EventStreamFlow{}
|
||||
}
|
||||
|
||||
func getEventStreamSink(stream acidv1.Stream, eventType string) zalandov1alpha1.EventStreamSink {
|
||||
|
|
@ -190,8 +176,7 @@ func getTableSchema(fullTableName string) (tableName, schemaName string) {
|
|||
|
||||
func getOutboxTable(tableName, eventType string) zalandov1alpha1.EventStreamTable {
|
||||
return zalandov1alpha1.EventStreamTable{
|
||||
Name: outboxTableNameTemplate.Format("table", tableName, "eventtype", eventType),
|
||||
IDColumn: "id",
|
||||
Name: outboxTableNameTemplate.Format("table", tableName, "eventtype", eventType),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -236,15 +221,16 @@ func (c *Cluster) syncStreams() error {
|
|||
c.logger.Infof("event streams do not exist, create it")
|
||||
err := c.createStreams()
|
||||
if err != nil {
|
||||
return fmt.Errorf("event stream creation failed: %v", err)
|
||||
return fmt.Errorf("event streams creation failed: %v", err)
|
||||
}
|
||||
} else {
|
||||
desiredStreams := c.generateFabricEventStream()
|
||||
if !reflect.DeepEqual(effectiveStreams.Spec, desiredStreams.Spec) {
|
||||
c.logger.Debug("updating event streams")
|
||||
desiredStreams.ObjectMeta.ResourceVersion = effectiveStreams.ObjectMeta.ResourceVersion
|
||||
err = c.updateStreams(desiredStreams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("event stream update failed: %v", err)
|
||||
return fmt.Errorf("event streams update failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,16 +47,7 @@ var (
|
|||
},
|
||||
Streams: []acidv1.Stream{
|
||||
{
|
||||
StreamType: "nakadi",
|
||||
Database: "foo",
|
||||
Tables: map[string]string{
|
||||
"bar": "stream_type_a",
|
||||
},
|
||||
BatchSize: uint32(100),
|
||||
},
|
||||
{
|
||||
StreamType: "wal",
|
||||
Database: "foo",
|
||||
Database: "foo",
|
||||
Tables: map[string]string{
|
||||
"bar": "stream_type_a",
|
||||
},
|
||||
|
|
@ -132,8 +123,7 @@ func TestUpdateFabricEventStream(t *testing.T) {
|
|||
var pgSpec acidv1.PostgresSpec
|
||||
pgSpec.Streams = []acidv1.Stream{
|
||||
{
|
||||
StreamType: "nakadi",
|
||||
Database: "foo",
|
||||
Database: "foo",
|
||||
Tables: map[string]string{
|
||||
"bar": "stream_type_b",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -2,15 +2,9 @@ package constants
|
|||
|
||||
// PostgreSQL specific constants
|
||||
const (
|
||||
EventStreamSourcePGType = "PostgresLogicalReplication"
|
||||
EventStreamSourceSlotPrefix = "fes"
|
||||
EventStreamSourceAuthType = "DatabaseAuthenticationSecret"
|
||||
EventStreamFlowPgNakadiType = "PostgresWalToNakadiDataEvent"
|
||||
EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent"
|
||||
EventStreamFlowDataTypeColumn = "data_type"
|
||||
EventStreamFlowDataOpColumn = "data_op"
|
||||
EventStreamFlowMetadataColumn = "metadata"
|
||||
EventStreamFlowDataColumn = "data"
|
||||
EventStreamFlowPayloadColumn = "payload"
|
||||
EventStreamSinkNakadiType = "Nakadi"
|
||||
EventStreamSourcePGType = "PostgresLogicalReplication"
|
||||
EventStreamSourceSlotPrefix = "fes"
|
||||
EventStreamSourceAuthType = "DatabaseAuthenticationSecret"
|
||||
EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent"
|
||||
EventStreamSinkNakadiType = "Nakadi"
|
||||
)
|
||||
|
|
|
|||
Loading…
Reference in New Issue