2020import org .apache .flink .api .common .serialization .SerializationSchema ;
2121import org .apache .flink .streaming .api .datastream .DataStream ;
2222import org .apache .flink .streaming .api .functions .sink .SinkFunction ;
23- import org .apache .flink .streaming .connectors .kafka .* ;
24- import org .apache .flink .streaming .connectors .kafka .partitioner . FlinkFixedPartitioner ;
23+ import org .apache .flink .streaming .connectors .kafka .FlinkKafkaProducerBase ;
24+ import org .apache .flink .streaming .connectors .kafka .Kafka09TableSink ;
2525import org .apache .flink .streaming .connectors .kafka .partitioner .FlinkKafkaDelegatePartitioner ;
2626import org .apache .flink .streaming .connectors .kafka .partitioner .FlinkKafkaPartitioner ;
27- import org .apache .flink .streaming . connectors . kafka . partitioner . KafkaPartitioner ;
28- import org .apache .flink .table .util . TableConnectorUtil ;
27+ import org .apache .flink .table . api . TableSchema ;
28+ import org .apache .flink .table .utils . TableConnectorUtils ;
2929import org .apache .flink .types .Row ;
3030
3131import java .util .Optional ;
3838 *
3939 * @author maqi
4040 */
41- public class CustomerKafka09JsonTableSink extends KafkaJsonTableSink {
41+ public class CustomerKafka09JsonTableSink extends Kafka09TableSink {
4242
4343
4444 protected SerializationSchema schema ;
4545
46- public CustomerKafka09JsonTableSink (String topic , Properties properties , SerializationSchema schema ) {
47- super (topic , properties , new FlinkFixedPartitioner <>());
48- this .schema = schema ;
49- }
5046
51- public CustomerKafka09JsonTableSink (String topic , Properties properties , FlinkKafkaPartitioner <Row > partitioner , SerializationSchema schema ) {
52- super (topic , properties , partitioner );
53- this .schema = schema ;
54- }
5547
5648
5749 @ Deprecated
58- public CustomerKafka09JsonTableSink (String topic , Properties properties , KafkaPartitioner <Row > partitioner , SerializationSchema schema ) {
59- super (topic , properties , new FlinkKafkaDelegatePartitioner <>(partitioner ));
60- this .schema = schema ;
50+ public CustomerKafka09JsonTableSink (TableSchema schema ,
51+ String topic ,
52+ Properties properties ,
53+ Optional <FlinkKafkaPartitioner <Row >> partitioner ,
54+ SerializationSchema <Row > serializationSchema ) {
55+ super (schema , topic , properties , partitioner , serializationSchema );
56+ this .schema = serializationSchema ;
6157 }
6258
6359 @ Override
64- protected SinkFunction <Row > createKafkaProducer (String s , Properties properties , SerializationSchema <Row > serializationSchema , Optional <FlinkKafkaPartitioner <Row >> optional ) {
60+ protected FlinkKafkaProducerBase <Row > createKafkaProducer (String s , Properties properties , SerializationSchema <Row > serializationSchema , Optional <FlinkKafkaPartitioner <Row >> optional ) {
6561 return new CustomerFlinkKafkaProducer09 <>(topic , serializationSchema , properties );
6662 }
6763
@@ -70,6 +66,6 @@ public void emitDataStream(DataStream<Row> dataStream) {
7066 SinkFunction <Row > kafkaProducer = createKafkaProducer (topic , properties , schema , partitioner );
7167 // always enable flush on checkpoint to achieve at-least-once if query runs with checkpointing enabled.
7268 //kafkaProducer.setFlushOnCheckpoint(true);
73- dataStream .addSink (kafkaProducer ).name (TableConnectorUtil .generateRuntimeName (this .getClass (), fieldNames ));
69+ dataStream .addSink (kafkaProducer ).name (TableConnectorUtils .generateRuntimeName (this .getClass (), getFieldNames () ));
7470 }
7571}
0 commit comments