Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
package com.example.kafkacluster;

import org.apache.kafka.common.Uuid;
import org.awaitility.Awaitility;
import org.testcontainers.containers.Container;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.kafka.KafkaContainer;
import org.testcontainers.lifecycle.Startable;
import org.testcontainers.utility.DockerImageName;

import java.time.Duration;
import java.util.Collection;
import java.util.stream.Collectors;
import java.util.stream.IntStream;

import static org.assertj.core.api.Assertions.assertThat;

public class ApacheKafkaContainerCluster implements Startable {

private final int brokersNum;

private final Network network;

private final Collection<KafkaContainer> brokers;

public ApacheKafkaContainerCluster(String version, int brokersNum, int internalTopicsRf) {
if (brokersNum < 0) {
throw new IllegalArgumentException("brokersNum '" + brokersNum + "' must be greater than 0");
}
if (internalTopicsRf < 0 || internalTopicsRf > brokersNum) {
throw new IllegalArgumentException(
"internalTopicsRf '" + internalTopicsRf + "' must be less than brokersNum and greater than 0"
);
}

this.brokersNum = brokersNum;
this.network = Network.newNetwork();

String controllerQuorumVoters = IntStream
.range(0, brokersNum)
.mapToObj(brokerNum -> String.format("%d@broker-%d:9094", brokerNum, brokerNum))
.collect(Collectors.joining(","));

String clusterId = Uuid.randomUuid().toString();

this.brokers =
IntStream
.range(0, brokersNum)
.mapToObj(brokerNum -> {
return new KafkaContainer(DockerImageName.parse("apache/kafka").withTag(version))
.withNetwork(this.network)
.withNetworkAliases("broker-" + brokerNum)
.withEnv("CLUSTER_ID", clusterId)
.withEnv("KAFKA_BROKER_ID", brokerNum + "")
.withEnv("KAFKA_NODE_ID", brokerNum + "")
.withEnv("KAFKA_CONTROLLER_QUORUM_VOTERS", controllerQuorumVoters)
.withEnv("KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", internalTopicsRf + "")
.withEnv("KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS", "0")
.withEnv("KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS", internalTopicsRf + "")
.withEnv("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", internalTopicsRf + "")
.withEnv("KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", internalTopicsRf + "")
.withStartupTimeout(Duration.ofMinutes(1));
})
.collect(Collectors.toList());
}

public Collection<KafkaContainer> getBrokers() {
return this.brokers;
}

public String getBootstrapServers() {
return brokers.stream().map(KafkaContainer::getBootstrapServers).collect(Collectors.joining(","));
}

@Override
public void start() {
// Needs to start all the brokers at once
brokers.parallelStream().forEach(GenericContainer::start);

Awaitility
.await()
.atMost(Duration.ofSeconds(30))
.untilAsserted(() -> {
Container.ExecResult result =
this.brokers.stream()
.findFirst()
.get()
.execInContainer(
"sh",
"-c",
"/opt/kafka/bin/kafka-log-dirs.sh --bootstrap-server localhost:9093 --describe | grep -o '\"broker\"' | wc -l"
);
String brokers = result.getStdout().replace("\n", "");

assertThat(brokers).asInt().isEqualTo(this.brokersNum);
});
}

@Override
public void stop() {
this.brokers.stream().parallel().forEach(GenericContainer::stop);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
package com.example.kafkacluster;

import com.google.common.collect.ImmutableMap;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.Test;

import java.time.Duration;
import java.util.Collection;
import java.util.Collections;
import java.util.UUID;
import java.util.concurrent.TimeUnit;

import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.tuple;

class ApacheKafkaContainerClusterTest {

@Test
void testKafkaContainerCluster() throws Exception {
try (ApacheKafkaContainerCluster cluster = new ApacheKafkaContainerCluster("3.8.0", 3, 2)) {
cluster.start();
String bootstrapServers = cluster.getBootstrapServers();

assertThat(cluster.getBrokers()).hasSize(3);

testKafkaFunctionality(bootstrapServers, 3, 2);
}
}

protected void testKafkaFunctionality(String bootstrapServers, int partitions, int rf) throws Exception {
try (
AdminClient adminClient = AdminClient.create(
ImmutableMap.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)
);
KafkaProducer<String, String> producer = new KafkaProducer<>(
ImmutableMap.of(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
bootstrapServers,
ProducerConfig.CLIENT_ID_CONFIG,
UUID.randomUUID().toString()
),
new StringSerializer(),
new StringSerializer()
);
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(
ImmutableMap.of(
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
bootstrapServers,
ConsumerConfig.GROUP_ID_CONFIG,
"tc-" + UUID.randomUUID(),
ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
"earliest"
),
new StringDeserializer(),
new StringDeserializer()
);
) {
String topicName = "messages";

Collection<NewTopic> topics = Collections.singletonList(new NewTopic(topicName, partitions, (short) rf));
adminClient.createTopics(topics).all().get(30, TimeUnit.SECONDS);

consumer.subscribe(Collections.singletonList(topicName));

producer.send(new ProducerRecord<>(topicName, "testcontainers", "rulezzz")).get();

Awaitility
.await()
.atMost(Duration.ofSeconds(10))
.untilAsserted(() -> {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));

assertThat(records)
.hasSize(1)
.extracting(ConsumerRecord::topic, ConsumerRecord::key, ConsumerRecord::value)
.containsExactly(tuple(topicName, "testcontainers", "rulezzz"));
});

consumer.unsubscribe();
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
package com.example.kafkacluster;

import org.apache.kafka.common.Uuid;
import org.awaitility.Awaitility;
import org.testcontainers.containers.Container;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
import org.testcontainers.kafka.ConfluentKafkaContainer;
import org.testcontainers.lifecycle.Startable;
import org.testcontainers.utility.DockerImageName;

import java.time.Duration;
import java.util.Collection;
import java.util.stream.Collectors;
import java.util.stream.IntStream;

import static org.assertj.core.api.Assertions.assertThat;

public class ConfluentKafkaContainerCluster implements Startable {

private final int brokersNum;

private final Network network;

private final Collection<ConfluentKafkaContainer> brokers;

public ConfluentKafkaContainerCluster(String confluentPlatformVersion, int brokersNum, int internalTopicsRf) {
if (brokersNum < 0) {
throw new IllegalArgumentException("brokersNum '" + brokersNum + "' must be greater than 0");
}
if (internalTopicsRf < 0 || internalTopicsRf > brokersNum) {
throw new IllegalArgumentException(
"internalTopicsRf '" + internalTopicsRf + "' must be less than brokersNum and greater than 0"
);
}

this.brokersNum = brokersNum;
this.network = Network.newNetwork();

String controllerQuorumVoters = IntStream
.range(0, brokersNum)
.mapToObj(brokerNum -> String.format("%d@broker-%d:9094", brokerNum, brokerNum))
.collect(Collectors.joining(","));

String clusterId = Uuid.randomUuid().toString();

this.brokers =
IntStream
.range(0, brokersNum)
.mapToObj(brokerNum -> {
return new ConfluentKafkaContainer(
DockerImageName.parse("confluentinc/cp-kafka").withTag(confluentPlatformVersion)
)
.withNetwork(this.network)
.withNetworkAliases("broker-" + brokerNum)
.withEnv("CLUSTER_ID", clusterId)
.withEnv("KAFKA_BROKER_ID", brokerNum + "")
.withEnv("KAFKA_NODE_ID", brokerNum + "")
.withEnv("KAFKA_CONTROLLER_QUORUM_VOTERS", controllerQuorumVoters)
.withEnv("KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", internalTopicsRf + "")
.withEnv("KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS", internalTopicsRf + "")
.withEnv("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", internalTopicsRf + "")
.withEnv("KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", internalTopicsRf + "")
.withStartupTimeout(Duration.ofMinutes(1));
})
.collect(Collectors.toList());
}

public Collection<ConfluentKafkaContainer> getBrokers() {
return this.brokers;
}

public String getBootstrapServers() {
return brokers.stream().map(ConfluentKafkaContainer::getBootstrapServers).collect(Collectors.joining(","));
}

@Override
public void start() {
// Needs to start all the brokers at once
brokers.parallelStream().forEach(GenericContainer::start);

Awaitility
.await()
.atMost(Duration.ofSeconds(30))
.untilAsserted(() -> {
Container.ExecResult result =
this.brokers.stream()
.findFirst()
.get()
.execInContainer(
"sh",
"-c",
"kafka-metadata-shell --snapshot /var/lib/kafka/data/__cluster_metadata-0/00000000000000000000.log ls /brokers | wc -l"
);
String brokers = result.getStdout().replace("\n", "");

assertThat(brokers).asInt().isEqualTo(this.brokersNum);
});
}

@Override
public void stop() {
this.brokers.stream().parallel().forEach(GenericContainer::stop);
}
}
Loading
Loading