11import logging
22import uuid
3- from time import sleep
43from confluent_kafka import Consumer , Producer , TopicPartition
54
65logger = logging .getLogger ("saluki" )
@@ -17,6 +16,7 @@ def play(
1716 """
1817 Replay data from src_topic to dest_topic between the offsets OR timestamps specified.
1918 This currently assumes contiguous data in a topic (ie. no log compaction) and uses partition 0.
19+ It also does not copy message timestamps.
2020
2121 :param src_broker: The source broker, including port.
2222 :param src_topic: The topic to replay data from.
@@ -62,27 +62,12 @@ def play(
6262
6363 num_messages = stop_offset .offset - start_offset .offset + 1
6464
65- def delivery_report (err , msg ):
66- """ Called once for each message produced to indicate delivery result.
67- Triggered by poll() or flush()."""
68- if err is not None :
69- logger .error ('Message delivery failed: {}' .format (err ))
70- else :
71- logger .debug ('Message delivered to {} [{}]' .format (msg .topic (), msg .partition ()))
72-
7365 try :
7466 msgs = consumer .consume (num_messages )
7567 logger .debug (f"finished consuming { num_messages } messages" )
7668 consumer .close ()
77- # logger.debug(f"{msgs}")
78- for message in msgs :
79- producer .poll (0 )
80- producer .produce (dest_topic , message .value (), message .key (), callback = delivery_report )
81- # producer.produce_batch(dest_topic, [{'key': message.key(), 'value': message.value()} for message in msgs])
82- # producer.poll()
69+ producer .produce_batch (dest_topic , [{'key' : message .key (), 'value' : message .value ()} for message in msgs ])
8370 logger .debug (f"flushing producer. len(p): { len (producer )} " )
84- # while len(producer): producer.flush()
85-
8671 producer .flush (timeout = 10 )
8772
8873 logger .debug (f"length after flushing: { len (producer )} " )
0 commit comments