|
34 | 34 | import reactor.core.publisher.Mono; |
35 | 35 |
|
36 | 36 | import java.time.Duration; |
| 37 | +import java.time.Instant; |
37 | 38 | import java.util.ArrayList; |
38 | 39 | import java.util.Arrays; |
39 | 40 | import java.util.HashMap; |
@@ -201,20 +202,16 @@ public void readFromSingleContainer(boolean useMasterKey, CosmosMetadataStorageT |
201 | 202 | List<ConsumerRecord<String, JsonNode>> metadataRecords = new ArrayList<>(); |
202 | 203 | List<ConsumerRecord<String, JsonNode>> itemRecords = new ArrayList<>(); |
203 | 204 | int expectedMetadataRecordsCount = metadataStorageType == CosmosMetadataStorageType.COSMOS ? 0 : 2; |
204 | | - int expectedItemRecords = createdItems.size(); |
| 205 | + int expectedItemRecordsCount = createdItems.size(); |
205 | 206 |
|
206 | | - Unreliables.retryUntilTrue(30, TimeUnit.SECONDS, () -> { |
207 | | - kafkaConsumer.poll(Duration.ofMillis(1000)) |
208 | | - .iterator() |
209 | | - .forEachRemaining(consumerRecord -> { |
210 | | - if (consumerRecord.topic().equals(topicName)) { |
211 | | - itemRecords.add(consumerRecord); |
212 | | - } else if (consumerRecord.topic().equals(sourceConfig.getMetadataConfig().getStorageName())) { |
213 | | - metadataRecords.add(consumerRecord); |
214 | | - } |
215 | | - }); |
216 | | - return metadataRecords.size() >= expectedMetadataRecordsCount && itemRecords.size() >= expectedItemRecords; |
217 | | - }); |
| 207 | + pollChangesForSingleTopic( |
| 208 | + kafkaConsumer, |
| 209 | + topicName, |
| 210 | + sourceConfig.getMetadataConfig().getStorageName(), |
| 211 | + itemRecords, |
| 212 | + metadataRecords, |
| 213 | + expectedItemRecordsCount, |
| 214 | + expectedMetadataRecordsCount); |
218 | 215 |
|
219 | 216 | assertThat(metadataRecords.size()).isEqualTo(expectedMetadataRecordsCount); |
220 | 217 | if (metadataStorageType == CosmosMetadataStorageType.KAFKA) { |
@@ -270,15 +267,7 @@ public void readFromSingleContainer(boolean useMasterKey, CosmosMetadataStorageT |
270 | 267 | assertThat(feedRangesMetadataTopicOffsetOffset.getFeedRanges().size()).isEqualTo(1); |
271 | 268 | } |
272 | 269 |
|
273 | | - // validate the item records |
274 | | - assertThat(itemRecords.size()).isEqualTo(createdItems.size()); |
275 | | - List<String> receivedItems = |
276 | | - itemRecords.stream().map(consumerRecord -> { |
277 | | - JsonNode jsonNode = consumerRecord.value(); |
278 | | - return jsonNode.get("payload").get("id").asText(); |
279 | | - }).collect(Collectors.toList()); |
280 | | - |
281 | | - assertThat(receivedItems.containsAll(createdItems)).isTrue(); |
| 270 | + validateFeedRangeItemRecords(itemRecords, createdItems); |
282 | 271 |
|
283 | 272 | } finally { |
284 | 273 | if (client != null) { |
@@ -680,4 +669,150 @@ public void readFromAllContainer(boolean useMasterKey, CosmosMetadataStorageType |
680 | 669 | } |
681 | 670 | } |
682 | 671 | } |
| 672 | + |
| 673 | + @Test(groups = { "kafka-integration" }, timeOut = 2 * TIMEOUT) |
| 674 | + public void readFromSingleContainer_pause_and_resume() { |
| 675 | + logger.info("Pause and resume connector for single container "); |
| 676 | + String topicName = singlePartitionContainerName + "-" + UUID.randomUUID(); |
| 677 | + |
| 678 | + Map<String, String> sourceConnectorConfig = new HashMap<>(); |
| 679 | + sourceConnectorConfig.put("connector.class", "com.azure.cosmos.kafka.connect.CosmosSourceConnector"); |
| 680 | + sourceConnectorConfig.put("azure.cosmos.account.endpoint", KafkaCosmosTestConfigurations.HOST); |
| 681 | + sourceConnectorConfig.put("azure.cosmos.application.name", "Test"); |
| 682 | + sourceConnectorConfig.put("azure.cosmos.source.database.name", databaseName); |
| 683 | + sourceConnectorConfig.put("azure.cosmos.source.containers.includeAll", "false"); |
| 684 | + sourceConnectorConfig.put("azure.cosmos.source.containers.includedList", singlePartitionContainerName); |
| 685 | + sourceConnectorConfig.put("azure.cosmos.source.containers.topicMap", topicName + "#" + singlePartitionContainerName); |
| 686 | + sourceConnectorConfig.put("azure.cosmos.account.key", KafkaCosmosTestConfigurations.MASTER_KEY); |
| 687 | + |
| 688 | + // Create topic ahead of time |
| 689 | + kafkaCosmosConnectContainer.createTopic(topicName, 1); |
| 690 | + |
| 691 | + CosmosSourceConfig sourceConfig = new CosmosSourceConfig(sourceConnectorConfig); |
| 692 | + CosmosAsyncContainer container = client.getDatabase(databaseName).getContainer(singlePartitionContainerName); |
| 693 | + |
| 694 | + String connectorName = "simpleTest-" + UUID.randomUUID(); |
| 695 | + |
| 696 | + try { |
| 697 | + // create few items in the container |
| 698 | + logger.info("creating items in container {}", singlePartitionContainerName); |
| 699 | + List<String> createdItems = new ArrayList<>(); |
| 700 | + for (int i = 0; i < 10; i++) { |
| 701 | + TestItem testItem = TestItem.createNewItem(); |
| 702 | + container.createItem(testItem).block(); |
| 703 | + createdItems.add(testItem.getId()); |
| 704 | + } |
| 705 | + |
| 706 | + kafkaCosmosConnectContainer.registerConnector(connectorName, sourceConnectorConfig); |
| 707 | + |
| 708 | + logger.info("Getting consumer and subscribe to topic {}", singlePartitionContainerName); |
| 709 | + |
| 710 | + Properties consumerProperties = kafkaCosmosConnectContainer.getConsumerProperties(); |
| 711 | + consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); |
| 712 | + consumerProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class.getName()); |
| 713 | + KafkaConsumer<String, JsonNode> kafkaConsumer = new KafkaConsumer<>(consumerProperties); |
| 714 | + |
| 715 | + kafkaConsumer.subscribe( |
| 716 | + Arrays.asList( |
| 717 | + topicName, |
| 718 | + sourceConfig.getMetadataConfig().getStorageName())); |
| 719 | + |
| 720 | + List<ConsumerRecord<String, JsonNode>> metadataRecords = new ArrayList<>(); |
| 721 | + List<ConsumerRecord<String, JsonNode>> itemRecords = new ArrayList<>(); |
| 722 | + int expectedMetadataRecordsCount = 2; |
| 723 | + int expectedItemRecordsCount = createdItems.size(); |
| 724 | + |
| 725 | + pollChangesForSingleTopic( |
| 726 | + kafkaConsumer, |
| 727 | + topicName, |
| 728 | + sourceConfig.getMetadataConfig().getStorageName(), |
| 729 | + itemRecords, |
| 730 | + metadataRecords, |
| 731 | + expectedItemRecordsCount, |
| 732 | + expectedMetadataRecordsCount); |
| 733 | + |
| 734 | + assertThat(metadataRecords.size()).isEqualTo(expectedMetadataRecordsCount); |
| 735 | + validateFeedRangeItemRecords(itemRecords, createdItems); |
| 736 | + |
| 737 | + // now pause the connector |
| 738 | + kafkaCosmosConnectContainer.pauseConnector(connectorName); |
| 739 | + |
| 740 | + // create few items |
| 741 | + createdItems.clear(); |
| 742 | + metadataRecords.clear(); |
| 743 | + itemRecords.clear(); |
| 744 | + |
| 745 | + for (int i = 0; i < 5; i++) { |
| 746 | + TestItem testItem = TestItem.createNewItem(); |
| 747 | + container.createItem(testItem).block(); |
| 748 | + createdItems.add(testItem.getId()); |
| 749 | + } |
| 750 | + |
| 751 | + // resume the connector |
| 752 | + kafkaCosmosConnectContainer.resumeConnector(connectorName); |
| 753 | + // poll again, poll a little bit longer to make sure no duplicate records are being returned |
| 754 | + Instant startPollTime = Instant.now(); |
| 755 | + while (Duration.between(startPollTime, Instant.now()).toMillis() < 60 * 1000 ) { |
| 756 | + kafkaConsumer.poll(Duration.ofMillis(1000)) |
| 757 | + .iterator() |
| 758 | + .forEachRemaining(consumerRecord -> { |
| 759 | + if (consumerRecord.topic().equals(topicName)) { |
| 760 | + itemRecords.add(consumerRecord); |
| 761 | + } else if (consumerRecord.topic().equals(sourceConfig.getMetadataConfig().getStorageName())) { |
| 762 | + metadataRecords.add(consumerRecord); |
| 763 | + } |
| 764 | + }); |
| 765 | + } |
| 766 | + |
| 767 | + assertThat(metadataRecords.size()).isEqualTo(expectedMetadataRecordsCount); |
| 768 | + validateFeedRangeItemRecords(itemRecords, createdItems); |
| 769 | + } finally { |
| 770 | + if (client != null) { |
| 771 | + logger.info("cleaning container {}", singlePartitionContainerName); |
| 772 | + cleanUpContainer(client, databaseName, singlePartitionContainerName); |
| 773 | + } |
| 774 | + |
| 775 | + // IMPORTANT: remove the connector after use |
| 776 | + if (kafkaCosmosConnectContainer != null) { |
| 777 | + kafkaCosmosConnectContainer.deleteConnector(connectorName); |
| 778 | + } |
| 779 | + } |
| 780 | + } |
| 781 | + |
| 782 | + private void pollChangesForSingleTopic( |
| 783 | + KafkaConsumer<String, JsonNode> kafkaConsumer, |
| 784 | + String topicName, |
| 785 | + String storageName, |
| 786 | + List<ConsumerRecord<String, JsonNode>> itemRecords, |
| 787 | + List<ConsumerRecord<String, JsonNode>> metadataRecords, |
| 788 | + int expectedItemRecords, |
| 789 | + int expectedMetadataRecordsCount) { |
| 790 | + |
| 791 | + Unreliables.retryUntilTrue(30, TimeUnit.SECONDS, () -> { |
| 792 | + kafkaConsumer.poll(Duration.ofMillis(1000)) |
| 793 | + .iterator() |
| 794 | + .forEachRemaining(consumerRecord -> { |
| 795 | + if (consumerRecord.topic().equals(topicName)) { |
| 796 | + itemRecords.add(consumerRecord); |
| 797 | + } else if (consumerRecord.topic().equals(storageName)) { |
| 798 | + metadataRecords.add(consumerRecord); |
| 799 | + } |
| 800 | + }); |
| 801 | + return metadataRecords.size() >= expectedMetadataRecordsCount && itemRecords.size() >= expectedItemRecords; |
| 802 | + }); |
| 803 | + } |
| 804 | + |
| 805 | + private void validateFeedRangeItemRecords( |
| 806 | + List<ConsumerRecord<String, JsonNode>> itemRecords, |
| 807 | + List<String> expectedItems) { |
| 808 | + // validate the item records |
| 809 | + assertThat(itemRecords.size()).isEqualTo(expectedItems.size()); |
| 810 | + List<String> receivedItems = |
| 811 | + itemRecords.stream().map(consumerRecord -> { |
| 812 | + JsonNode jsonNode = consumerRecord.value(); |
| 813 | + return jsonNode.get("payload").get("id").asText(); |
| 814 | + }).collect(Collectors.toList()); |
| 815 | + |
| 816 | + assertThat(receivedItems.containsAll(expectedItems)).isTrue(); |
| 817 | + } |
683 | 818 | } |
0 commit comments