@@ -160,14 +160,18 @@ abstract class KafkaSourceTest extends StreamTest with SharedSQLContext with Kaf
160
160
}
161
161
162
162
object WithOffsetSync {
163
- def apply (topic : String )(func : () => Unit ): StreamAction = {
163
+ /**
164
+ * Run `func` to write some Kafka messages and wait until the latest offset of the given
165
+ * `TopicPartition` is not less than `expectedOffset`.
166
+ */
167
+ def apply (
168
+ topicPartition : TopicPartition ,
169
+ expectedOffset : Long )(func : () => Unit ): StreamAction = {
164
170
Execute (" Run Kafka Producer" )(_ => {
165
171
func()
166
172
// This is a hack for the race condition that the committed message may be not visible to
167
173
// consumer for a short time.
168
- // Looks like after the following call returns, the consumer can always read the committed
169
- // messages.
170
- testUtils.getLatestOffsets(Set (topic))
174
+ testUtils.waitUntilOffsetAppears(topicPartition, expectedOffset)
171
175
})
172
176
}
173
177
}
@@ -652,13 +656,14 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
652
656
}
653
657
}
654
658
659
+ val topicPartition = new TopicPartition (topic, 0 )
655
660
// The message values are the same as their offsets to make the test easy to follow
656
661
testUtils.withTranscationalProducer { producer =>
657
662
testStream(mapped)(
658
663
StartStream (ProcessingTime (100 ), clock),
659
664
waitUntilBatchProcessed,
660
665
CheckAnswer (),
661
- WithOffsetSync (topic ) { () =>
666
+ WithOffsetSync (topicPartition, expectedOffset = 5 ) { () =>
662
667
// Send 5 messages. They should be visible only after being committed.
663
668
producer.beginTransaction()
664
669
(0 to 4 ).foreach { i =>
@@ -669,7 +674,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
669
674
waitUntilBatchProcessed,
670
675
// Should not see any uncommitted messages
671
676
CheckNewAnswer (),
672
- WithOffsetSync (topic ) { () =>
677
+ WithOffsetSync (topicPartition, expectedOffset = 6 ) { () =>
673
678
producer.commitTransaction()
674
679
},
675
680
AdvanceManualClock (100 ),
@@ -678,7 +683,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
678
683
AdvanceManualClock (100 ),
679
684
waitUntilBatchProcessed,
680
685
CheckNewAnswer (3 , 4 ), // offset: 3, 4, 5* [* means it's not a committed data message]
681
- WithOffsetSync (topic ) { () =>
686
+ WithOffsetSync (topicPartition, expectedOffset = 12 ) { () =>
682
687
// Send 5 messages and abort the transaction. They should not be read.
683
688
producer.beginTransaction()
684
689
(6 to 10 ).foreach { i =>
@@ -692,7 +697,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
692
697
AdvanceManualClock (100 ),
693
698
waitUntilBatchProcessed,
694
699
CheckNewAnswer (), // offset: 9*, 10*, 11*
695
- WithOffsetSync (topic ) { () =>
700
+ WithOffsetSync (topicPartition, expectedOffset = 18 ) { () =>
696
701
// Send 5 messages again. The consumer should skip the above aborted messages and read
697
702
// them.
698
703
producer.beginTransaction()
@@ -707,7 +712,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
707
712
AdvanceManualClock (100 ),
708
713
waitUntilBatchProcessed,
709
714
CheckNewAnswer (15 , 16 ), // offset: 15, 16, 17*
710
- WithOffsetSync (topic ) { () =>
715
+ WithOffsetSync (topicPartition, expectedOffset = 25 ) { () =>
711
716
producer.beginTransaction()
712
717
producer.send(new ProducerRecord [String , String ](topic, " 18" )).get()
713
718
producer.commitTransaction()
@@ -774,13 +779,14 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
774
779
}
775
780
}
776
781
782
+ val topicPartition = new TopicPartition (topic, 0 )
777
783
// The message values are the same as their offsets to make the test easy to follow
778
784
testUtils.withTranscationalProducer { producer =>
779
785
testStream(mapped)(
780
786
StartStream (ProcessingTime (100 ), clock),
781
787
waitUntilBatchProcessed,
782
788
CheckNewAnswer (),
783
- WithOffsetSync (topic ) { () =>
789
+ WithOffsetSync (topicPartition, expectedOffset = 5 ) { () =>
784
790
// Send 5 messages. They should be visible only after being committed.
785
791
producer.beginTransaction()
786
792
(0 to 4 ).foreach { i =>
@@ -790,13 +796,13 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
790
796
AdvanceManualClock (100 ),
791
797
waitUntilBatchProcessed,
792
798
CheckNewAnswer (0 , 1 , 2 ), // offset 0, 1, 2
793
- WithOffsetSync (topic ) { () =>
799
+ WithOffsetSync (topicPartition, expectedOffset = 6 ) { () =>
794
800
producer.commitTransaction()
795
801
},
796
802
AdvanceManualClock (100 ),
797
803
waitUntilBatchProcessed,
798
804
CheckNewAnswer (3 , 4 ), // offset: 3, 4, 5* [* means it's not a committed data message]
799
- WithOffsetSync (topic ) { () =>
805
+ WithOffsetSync (topicPartition, expectedOffset = 12 ) { () =>
800
806
// Send 5 messages and abort the transaction. They should not be read.
801
807
producer.beginTransaction()
802
808
(6 to 10 ).foreach { i =>
@@ -810,7 +816,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
810
816
AdvanceManualClock (100 ),
811
817
waitUntilBatchProcessed,
812
818
CheckNewAnswer (9 , 10 ), // offset: 9, 10, 11*
813
- WithOffsetSync (topic ) { () =>
819
+ WithOffsetSync (topicPartition, expectedOffset = 18 ) { () =>
814
820
// Send 5 messages again. The consumer should skip the above aborted messages and read
815
821
// them.
816
822
producer.beginTransaction()
@@ -825,7 +831,7 @@ abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
825
831
AdvanceManualClock (100 ),
826
832
waitUntilBatchProcessed,
827
833
CheckNewAnswer (15 , 16 ), // offset: 15, 16, 17*
828
- WithOffsetSync (topic ) { () =>
834
+ WithOffsetSync (topicPartition, expectedOffset = 25 ) { () =>
829
835
producer.beginTransaction()
830
836
producer.send(new ProducerRecord [String , String ](topic, " 18" )).get()
831
837
producer.commitTransaction()
0 commit comments