|
21 | 21 | import org.apache.flink.streaming.api.datastream.DataStream; |
22 | 22 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010; |
23 | 23 | import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducerBase; |
24 | | -import org.apache.flink.streaming.connectors.kafka.Kafka09JsonTableSink; |
| 24 | +import org.apache.flink.streaming.connectors.kafka.Kafka010JsonTableSink; |
25 | 25 | import org.apache.flink.streaming.connectors.kafka.KafkaJsonTableSink; |
26 | 26 | import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; |
27 | 27 | import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner; |
|
33 | 33 | import java.util.Properties; |
34 | 34 |
|
35 | 35 | /** |
| 36 | + * |
36 | 37 | * Reason: add schema info |
37 | 38 | * Date: 2019/4/8 |
38 | 39 | * Company: www.dtstack.com |
@@ -60,15 +61,15 @@ public CustomerKafka11JsonTableSink(String topic, Properties properties, KafkaPa |
60 | 61 | super(topic, properties, new FlinkKafkaDelegatePartitioner<>(partitioner)); |
61 | 62 | this.schema = schema; |
62 | 63 | } |
63 | | - |
| 64 | + //TODO 暂时使用010 |
64 | 65 | @Override |
65 | 66 | protected FlinkKafkaProducerBase<Row> createKafkaProducer(String topic, Properties properties, SerializationSchema<Row> serializationSchema, FlinkKafkaPartitioner<Row> partitioner) { |
66 | 67 | return new FlinkKafkaProducer010<Row>(topic, serializationSchema, properties, partitioner); |
67 | 68 | } |
68 | 69 |
|
69 | 70 | @Override |
70 | | - protected Kafka09JsonTableSink createCopy() { |
71 | | - return new Kafka09JsonTableSink(topic, properties, partitioner); |
| 71 | + protected Kafka010JsonTableSink createCopy() { |
| 72 | + return new Kafka010JsonTableSink(topic, properties, partitioner); |
72 | 73 | } |
73 | 74 |
|
74 | 75 | @Override |
|
0 commit comments