|
| 1 | +package com.example.kafkacluster; |
| 2 | + |
| 3 | +import com.google.common.collect.ImmutableMap; |
| 4 | +import org.apache.kafka.clients.admin.AdminClient; |
| 5 | +import org.apache.kafka.clients.admin.AdminClientConfig; |
| 6 | +import org.apache.kafka.clients.admin.NewTopic; |
| 7 | +import org.apache.kafka.clients.consumer.ConsumerConfig; |
| 8 | +import org.apache.kafka.clients.consumer.ConsumerRecord; |
| 9 | +import org.apache.kafka.clients.consumer.ConsumerRecords; |
| 10 | +import org.apache.kafka.clients.consumer.KafkaConsumer; |
| 11 | +import org.apache.kafka.clients.producer.KafkaProducer; |
| 12 | +import org.apache.kafka.clients.producer.ProducerConfig; |
| 13 | +import org.apache.kafka.clients.producer.ProducerRecord; |
| 14 | +import org.apache.kafka.common.serialization.StringDeserializer; |
| 15 | +import org.apache.kafka.common.serialization.StringSerializer; |
| 16 | +import org.awaitility.Awaitility; |
| 17 | +import org.junit.jupiter.api.Test; |
| 18 | + |
| 19 | +import java.time.Duration; |
| 20 | +import java.util.Collection; |
| 21 | +import java.util.Collections; |
| 22 | +import java.util.UUID; |
| 23 | +import java.util.concurrent.TimeUnit; |
| 24 | + |
| 25 | +import static org.assertj.core.api.Assertions.assertThat; |
| 26 | +import static org.assertj.core.api.Assertions.tuple; |
| 27 | + |
| 28 | +class ApacheKafkaContainerClusterTest { |
| 29 | + |
| 30 | + @Test |
| 31 | + void testKafkaContainerCluster() throws Exception { |
| 32 | + try (ApacheKafkaContainerCluster cluster = new ApacheKafkaContainerCluster("3.8.0", 3, 2)) { |
| 33 | + cluster.start(); |
| 34 | + String bootstrapServers = cluster.getBootstrapServers(); |
| 35 | + |
| 36 | + assertThat(cluster.getBrokers()).hasSize(3); |
| 37 | + |
| 38 | + testKafkaFunctionality(bootstrapServers, 3, 2); |
| 39 | + } |
| 40 | + } |
| 41 | + |
| 42 | + protected void testKafkaFunctionality(String bootstrapServers, int partitions, int rf) throws Exception { |
| 43 | + try ( |
| 44 | + AdminClient adminClient = AdminClient.create( |
| 45 | + ImmutableMap.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers) |
| 46 | + ); |
| 47 | + KafkaProducer<String, String> producer = new KafkaProducer<>( |
| 48 | + ImmutableMap.of( |
| 49 | + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, |
| 50 | + bootstrapServers, |
| 51 | + ProducerConfig.CLIENT_ID_CONFIG, |
| 52 | + UUID.randomUUID().toString() |
| 53 | + ), |
| 54 | + new StringSerializer(), |
| 55 | + new StringSerializer() |
| 56 | + ); |
| 57 | + KafkaConsumer<String, String> consumer = new KafkaConsumer<>( |
| 58 | + ImmutableMap.of( |
| 59 | + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, |
| 60 | + bootstrapServers, |
| 61 | + ConsumerConfig.GROUP_ID_CONFIG, |
| 62 | + "tc-" + UUID.randomUUID(), |
| 63 | + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, |
| 64 | + "earliest" |
| 65 | + ), |
| 66 | + new StringDeserializer(), |
| 67 | + new StringDeserializer() |
| 68 | + ); |
| 69 | + ) { |
| 70 | + String topicName = "messages"; |
| 71 | + |
| 72 | + Collection<NewTopic> topics = Collections.singletonList(new NewTopic(topicName, partitions, (short) rf)); |
| 73 | + adminClient.createTopics(topics).all().get(30, TimeUnit.SECONDS); |
| 74 | + |
| 75 | + consumer.subscribe(Collections.singletonList(topicName)); |
| 76 | + |
| 77 | + producer.send(new ProducerRecord<>(topicName, "testcontainers", "rulezzz")).get(); |
| 78 | + |
| 79 | + Awaitility |
| 80 | + .await() |
| 81 | + .atMost(Duration.ofSeconds(10)) |
| 82 | + .untilAsserted(() -> { |
| 83 | + ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100)); |
| 84 | + |
| 85 | + assertThat(records) |
| 86 | + .hasSize(1) |
| 87 | + .extracting(ConsumerRecord::topic, ConsumerRecord::key, ConsumerRecord::value) |
| 88 | + .containsExactly(tuple(topicName, "testcontainers", "rulezzz")); |
| 89 | + }); |
| 90 | + |
| 91 | + consumer.unsubscribe(); |
| 92 | + } |
| 93 | + } |
| 94 | +} |
0 commit comments