diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommittableSerializerTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommittableSerializerTest.java index 9f1e8126e..c77cf355a 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommittableSerializerTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommittableSerializerTest.java @@ -17,9 +17,10 @@ package org.apache.flink.connector.kafka.sink; -import org.apache.flink.util.TestLogger; +import org.apache.flink.util.TestLoggerExtension; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.io.IOException; @@ -29,12 +30,13 @@ * Tests for serializing and deserialzing {@link KafkaCommittable} with {@link * KafkaCommittableSerializer}. */ -public class KafkaCommittableSerializerTest extends TestLogger { +@ExtendWith({TestLoggerExtension.class}) +class KafkaCommittableSerializerTest { private static final KafkaCommittableSerializer SERIALIZER = new KafkaCommittableSerializer(); @Test - public void testCommittableSerDe() throws IOException { + void testCommittableSerDe() throws IOException { final String transactionalId = "test-id"; final short epoch = 5; final KafkaCommittable committable = new KafkaCommittable(1L, epoch, transactionalId, null); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommitterTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommitterTest.java index ea9d893ed..ef6b3068e 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommitterTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommitterTest.java @@ -36,7 +36,7 @@ /** Tests for {@link KafkaCommitter}. */ @ExtendWith({TestLoggerExtension.class}) -public class KafkaCommitterTest { +class KafkaCommitterTest { private static final int PRODUCER_ID = 0; private static final short EPOCH = 0; @@ -44,7 +44,7 @@ public class KafkaCommitterTest { /** Causes a network error by inactive broker and tests that a retry will happen. */ @Test - public void testRetryCommittableOnRetriableError() throws IOException, InterruptedException { + void testRetryCommittableOnRetriableError() throws IOException, InterruptedException { Properties properties = getProperties(); try (final KafkaCommitter committer = new KafkaCommitter(properties); FlinkKafkaInternalProducer producer = @@ -66,7 +66,7 @@ public void testRetryCommittableOnRetriableError() throws IOException, Interrupt } @Test - public void testFailJobOnUnknownFatalError() throws IOException, InterruptedException { + void testFailJobOnUnknownFatalError() throws IOException, InterruptedException { Properties properties = getProperties(); try (final KafkaCommitter committer = new KafkaCommitter(properties); FlinkKafkaInternalProducer producer = @@ -87,7 +87,7 @@ public void testFailJobOnUnknownFatalError() throws IOException, InterruptedExce } @Test - public void testKafkaCommitterClosesProducer() throws IOException, InterruptedException { + void testKafkaCommitterClosesProducer() throws IOException, InterruptedException { Properties properties = getProperties(); FlinkKafkaInternalProducer producer = new FlinkKafkaInternalProducer(properties, TRANSACTIONAL_ID) { diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaRecordSerializationSchemaBuilderTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaRecordSerializationSchemaBuilderTest.java index 701f9c8aa..c51aaf7a4 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaRecordSerializationSchemaBuilderTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaRecordSerializationSchemaBuilderTest.java @@ -21,7 +21,7 @@ import org.apache.flink.api.common.serialization.SimpleStringSchema; import org.apache.flink.connector.testutils.formats.DummyInitializationContext; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; -import org.apache.flink.util.TestLogger; +import org.apache.flink.util.TestLoggerExtension; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.Configurable; @@ -31,8 +31,9 @@ import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.nio.charset.StandardCharsets; import java.util.Arrays; @@ -47,7 +48,8 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for {@link KafkaRecordSerializationSchemaBuilder}. */ -public class KafkaRecordSerializationSchemaBuilderTest extends TestLogger { +@ExtendWith({TestLoggerExtension.class}) +class KafkaRecordSerializationSchemaBuilderTest { private static final String DEFAULT_TOPIC = "test"; @@ -55,25 +57,25 @@ public class KafkaRecordSerializationSchemaBuilderTest extends TestLogger { private static Map configuration; private static boolean isKeySerializer; - @Before - public void setUp() { + @BeforeEach + void setUp() { configurableConfiguration = new HashMap<>(); configuration = new HashMap<>(); isKeySerializer = false; } @Test - public void testDoNotAllowMultipleKeySerializer() { + void testDoNotAllowMultipleKeySerializer() { assertOnlyOneSerializerAllowed(keySerializationSetter()); } @Test - public void testDoNotAllowMultipleValueSerializer() { + void testDoNotAllowMultipleValueSerializer() { assertOnlyOneSerializerAllowed(valueSerializationSetter()); } @Test - public void testDoNotAllowMultipleTopicSelector() { + void testDoNotAllowMultipleTopicSelector() { assertThatThrownBy( () -> KafkaRecordSerializationSchema.builder() @@ -89,7 +91,7 @@ public void testDoNotAllowMultipleTopicSelector() { } @Test - public void testExpectTopicSelector() { + void testExpectTopicSelector() { assertThatThrownBy( KafkaRecordSerializationSchema.builder() .setValueSerializationSchema(new SimpleStringSchema()) @@ -98,13 +100,13 @@ public void testExpectTopicSelector() { } @Test - public void testExpectValueSerializer() { + void testExpectValueSerializer() { assertThatThrownBy(KafkaRecordSerializationSchema.builder().setTopic(DEFAULT_TOPIC)::build) .isInstanceOf(IllegalStateException.class); } @Test - public void testSerializeRecordWithTopicSelector() { + void testSerializeRecordWithTopicSelector() { final TopicSelector topicSelector = (e) -> { if (e.equals("a")) { @@ -129,7 +131,7 @@ public void testSerializeRecordWithTopicSelector() { } @Test - public void testSerializeRecordWithPartitioner() throws Exception { + void testSerializeRecordWithPartitioner() throws Exception { AtomicBoolean opened = new AtomicBoolean(false); final int partition = 5; final FlinkKafkaPartitioner partitioner = @@ -148,7 +150,7 @@ public void testSerializeRecordWithPartitioner() throws Exception { } @Test - public void testSerializeRecordWithHeaderProvider() throws Exception { + void testSerializeRecordWithHeaderProvider() throws Exception { final HeaderProvider headerProvider = (ignored) -> new RecordHeaders( @@ -169,7 +171,7 @@ public void testSerializeRecordWithHeaderProvider() throws Exception { } @Test - public void testSerializeRecordWithKey() { + void testSerializeRecordWithKey() { final SerializationSchema serializationSchema = new SimpleStringSchema(); final KafkaRecordSerializationSchema schema = KafkaRecordSerializationSchema.builder() @@ -184,7 +186,7 @@ public void testSerializeRecordWithKey() { } @Test - public void testKafkaKeySerializerWrapperWithoutConfigurable() throws Exception { + void testKafkaKeySerializerWrapperWithoutConfigurable() throws Exception { final Map config = Collections.singletonMap("simpleKey", "simpleValue"); final KafkaRecordSerializationSchema schema = KafkaRecordSerializationSchema.builder() @@ -201,7 +203,7 @@ public void testKafkaKeySerializerWrapperWithoutConfigurable() throws Exception } @Test - public void testKafkaValueSerializerWrapperWithoutConfigurable() throws Exception { + void testKafkaValueSerializerWrapperWithoutConfigurable() throws Exception { final Map config = Collections.singletonMap("simpleKey", "simpleValue"); final KafkaRecordSerializationSchema schema = KafkaRecordSerializationSchema.builder() @@ -215,7 +217,7 @@ public void testKafkaValueSerializerWrapperWithoutConfigurable() throws Exceptio } @Test - public void testSerializeRecordWithKafkaSerializer() throws Exception { + void testSerializeRecordWithKafkaSerializer() throws Exception { final Map config = Collections.singletonMap("configKey", "configValue"); final KafkaRecordSerializationSchema schema = KafkaRecordSerializationSchema.builder() @@ -231,7 +233,7 @@ public void testSerializeRecordWithKafkaSerializer() throws Exception { } @Test - public void testSerializeRecordWithTimestamp() { + void testSerializeRecordWithTimestamp() { final SerializationSchema serializationSchema = new SimpleStringSchema(); final KafkaRecordSerializationSchema schema = KafkaRecordSerializationSchema.builder() diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkBuilderTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkBuilderTest.java index eeecc84df..70f4c3474 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkBuilderTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkBuilderTest.java @@ -41,7 +41,7 @@ public class KafkaSinkBuilderTest extends TestLogger { }; @Test - public void testPropertyHandling() { + void testPropertyHandling() { validateProducerConfig( getBasicBuilder(), p -> { @@ -78,7 +78,7 @@ public void testPropertyHandling() { } @Test - public void testBootstrapServerSetting() { + void testBootstrapServerSetting() { Properties testConf1 = new Properties(); testConf1.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "testServer"); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java index 9cc80518a..9484cae7a 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java @@ -32,7 +32,6 @@ import org.apache.flink.configuration.StateBackendOptions; import org.apache.flink.connector.base.DeliveryGuarantee; import org.apache.flink.connector.kafka.sink.testutils.KafkaSinkExternalContextFactory; -import org.apache.flink.connector.kafka.testutils.DockerImageVersions; import org.apache.flink.connector.kafka.testutils.KafkaUtil; import org.apache.flink.connector.testframe.environment.MiniClusterTestEnvironment; import org.apache.flink.connector.testframe.external.DefaultContainerizedExternalSystem; @@ -58,7 +57,9 @@ import org.apache.flink.test.util.TestUtils; import org.apache.flink.testutils.junit.SharedObjects; import org.apache.flink.testutils.junit.SharedReference; -import org.apache.flink.util.TestLogger; +import org.apache.flink.testutils.junit.utils.TempDirUtils; +import org.apache.flink.util.DockerImageVersions; +import org.apache.flink.util.TestLoggerExtension; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.AdminClient; @@ -66,19 +67,20 @@ import org.apache.kafka.clients.admin.DeleteTopicsResult; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Nested; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.containers.KafkaContainer; import org.testcontainers.containers.Network; +import org.testcontainers.junit.jupiter.Container; import org.testcontainers.utility.DockerImageName; import javax.annotation.Nullable; @@ -86,6 +88,7 @@ import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -101,13 +104,14 @@ import java.util.stream.Collectors; import java.util.stream.LongStream; -import static org.apache.flink.connector.kafka.testutils.DockerImageVersions.KAFKA; import static org.apache.flink.connector.kafka.testutils.KafkaUtil.createKafkaContainer; +import static org.apache.flink.util.DockerImageVersions.KAFKA; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; /** Tests for using KafkaSink writing to a Kafka cluster. */ -public class KafkaSinkITCase extends TestLogger { +@ExtendWith({TestLoggerExtension.class}) +class KafkaSinkITCase { private static final Logger LOG = LoggerFactory.getLogger(KafkaSinkITCase.class); private static final String INTER_CONTAINER_KAFKA_ALIAS = "kafka"; @@ -122,19 +126,19 @@ public class KafkaSinkITCase extends TestLogger { private SharedReference failed; private SharedReference lastCheckpointedRecord; - @ClassRule + @Container public static final KafkaContainer KAFKA_CONTAINER = createKafkaContainer(KAFKA, LOG) .withEmbeddedZookeeper() .withNetwork(NETWORK) .withNetworkAliases(INTER_CONTAINER_KAFKA_ALIAS); - @Rule public final SharedObjects sharedObjects = SharedObjects.create(); + @RegisterExtension public final SharedObjects sharedObjects = SharedObjects.create(); - @Rule public final TemporaryFolder temp = new TemporaryFolder(); + @TempDir public Path temp; - @BeforeClass - public static void setupAdmin() { + @BeforeAll + static void setupAdmin() { Map properties = new HashMap<>(); properties.put( CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, @@ -142,12 +146,12 @@ public static void setupAdmin() { admin = AdminClient.create(properties); } - @AfterClass - public static void teardownAdmin() { + @AfterAll + static void teardownAdmin() { admin.close(); } - @Before + @BeforeEach public void setUp() throws ExecutionException, InterruptedException, TimeoutException { emittedRecordsCount = sharedObjects.add(new AtomicLong()); emittedRecordsWithCheckpoint = sharedObjects.add(new AtomicLong()); @@ -157,8 +161,8 @@ public void setUp() throws ExecutionException, InterruptedException, TimeoutExce createTestTopic(topic, 1, TOPIC_REPLICATION_FACTOR); } - @After - public void tearDown() throws ExecutionException, InterruptedException, TimeoutException { + @AfterEach + void tearDown() throws ExecutionException, InterruptedException, TimeoutException { deleteTestTopic(topic); } @@ -193,22 +197,22 @@ class IntegrationTests extends SinkTestSuiteBase { } @Test - public void testWriteRecordsToKafkaWithAtLeastOnceGuarantee() throws Exception { + void testWriteRecordsToKafkaWithAtLeastOnceGuarantee() throws Exception { writeRecordsToKafka(DeliveryGuarantee.AT_LEAST_ONCE, emittedRecordsCount); } @Test - public void testWriteRecordsToKafkaWithNoneGuarantee() throws Exception { + void testWriteRecordsToKafkaWithNoneGuarantee() throws Exception { writeRecordsToKafka(DeliveryGuarantee.NONE, emittedRecordsCount); } @Test - public void testWriteRecordsToKafkaWithExactlyOnceGuarantee() throws Exception { + void testWriteRecordsToKafkaWithExactlyOnceGuarantee() throws Exception { writeRecordsToKafka(DeliveryGuarantee.EXACTLY_ONCE, emittedRecordsWithCheckpoint); } @Test - public void testRecoveryWithAtLeastOnceGuarantee() throws Exception { + void testRecoveryWithAtLeastOnceGuarantee() throws Exception { testRecoveryWithAssertion( DeliveryGuarantee.AT_LEAST_ONCE, 1, @@ -216,7 +220,7 @@ public void testRecoveryWithAtLeastOnceGuarantee() throws Exception { } @Test - public void testRecoveryWithExactlyOnceGuarantee() throws Exception { + void testRecoveryWithExactlyOnceGuarantee() throws Exception { testRecoveryWithAssertion( DeliveryGuarantee.EXACTLY_ONCE, 1, @@ -229,7 +233,7 @@ public void testRecoveryWithExactlyOnceGuarantee() throws Exception { } @Test - public void testRecoveryWithExactlyOnceGuaranteeAndConcurrentCheckpoints() throws Exception { + void testRecoveryWithExactlyOnceGuaranteeAndConcurrentCheckpoints() throws Exception { testRecoveryWithAssertion( DeliveryGuarantee.EXACTLY_ONCE, 2, @@ -242,12 +246,12 @@ public void testRecoveryWithExactlyOnceGuaranteeAndConcurrentCheckpoints() throw } @Test - public void testAbortTransactionsOfPendingCheckpointsAfterFailure() throws Exception { + void testAbortTransactionsOfPendingCheckpointsAfterFailure() throws Exception { // Run a first job failing during the async phase of a checkpoint to leave some // lingering transactions final Configuration config = new Configuration(); config.setString(StateBackendOptions.STATE_BACKEND, "filesystem"); - final File checkpointDir = temp.newFolder(); + final File checkpointDir = TempDirUtils.newFolder(temp); config.setString( CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir.toURI().toString()); config.set( @@ -279,7 +283,7 @@ public void testAbortTransactionsOfPendingCheckpointsAfterFailure() throws Excep } @Test - public void testAbortTransactionsAfterScaleInBeforeFirstCheckpoint() throws Exception { + void testAbortTransactionsAfterScaleInBeforeFirstCheckpoint() throws Exception { // Run a first job opening 5 transactions one per subtask and fail in async checkpoint phase final Configuration config = new Configuration(); config.set(CoreOptions.DEFAULT_PARALLELISM, 5); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java index 1497c9bb9..97e5a2ee4 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java @@ -18,7 +18,7 @@ package org.apache.flink.connector.kafka.sink; import org.apache.flink.connector.kafka.sink.KafkaTransactionLog.TransactionRecord; -import org.apache.flink.util.TestLogger; +import org.apache.flink.util.TestLoggerExtension; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; @@ -26,12 +26,13 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.ByteArraySerializer; import org.apache.kafka.common.serialization.IntegerSerializer; -import org.junit.After; -import org.junit.ClassRule; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.containers.KafkaContainer; +import org.testcontainers.junit.jupiter.Container; import java.util.ArrayList; import java.util.List; @@ -49,25 +50,26 @@ import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link KafkaTransactionLog} to retrieve abortable Kafka transactions. */ -public class KafkaTransactionLogITCase extends TestLogger { +@ExtendWith({TestLoggerExtension.class}) +class KafkaTransactionLogITCase { private static final Logger LOG = LoggerFactory.getLogger(KafkaSinkITCase.class); private static final String TOPIC_NAME = "kafkaTransactionLogTest"; private static final String TRANSACTIONAL_ID_PREFIX = "kafka-log"; - @ClassRule + @Container public static final KafkaContainer KAFKA_CONTAINER = createKafkaContainer(KAFKA, LOG).withEmbeddedZookeeper(); private final List> openProducers = new ArrayList<>(); - @After - public void tearDown() { + @AfterEach + void tearDown() { openProducers.forEach(Producer::close); } @Test - public void testGetTransactions() { + void testGetTransactions() { committedTransaction(1); abortedTransaction(2); lingeringTransaction(3); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterITCase.java index c9eceb982..616d85a92 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterITCase.java @@ -78,7 +78,7 @@ /** Tests for the standalone KafkaWriter. */ @ExtendWith(TestLoggerExtension.class) -public class KafkaWriterITCase { +class KafkaWriterITCase { private static final Logger LOG = LoggerFactory.getLogger(KafkaWriterITCase.class); private static final String INTER_CONTAINER_KAFKA_ALIAS = "kafka"; @@ -97,17 +97,17 @@ public class KafkaWriterITCase { .withNetworkAliases(INTER_CONTAINER_KAFKA_ALIAS); @BeforeAll - public static void beforeAll() { + static void beforeAll() { KAFKA_CONTAINER.start(); } @AfterAll - public static void afterAll() { + static void afterAll() { KAFKA_CONTAINER.stop(); } @BeforeEach - public void setUp(TestInfo testInfo) { + void setUp(TestInfo testInfo) { metricListener = new MetricListener(); timeService = new TriggerTimeService(); topic = testInfo.getDisplayName().replaceAll("\\W", ""); @@ -118,19 +118,19 @@ public void setUp(TestInfo testInfo) { public void testRegisterMetrics(DeliveryGuarantee guarantee) throws Exception { try (final KafkaWriter ignored = createWriterWithConfiguration(getKafkaClientConfiguration(), guarantee)) { - assertThat(metricListener.getGauge(KAFKA_METRIC_WITH_GROUP_NAME).isPresent()).isTrue(); + assertThat(metricListener.getGauge(KAFKA_METRIC_WITH_GROUP_NAME)).isPresent(); } } @ParameterizedTest @EnumSource(DeliveryGuarantee.class) - public void testNotRegisterMetrics(DeliveryGuarantee guarantee) throws Exception { + void testNotRegisterMetrics(DeliveryGuarantee guarantee) throws Exception { assertKafkaMetricNotPresent(guarantee, "flink.disable-metrics", "true"); assertKafkaMetricNotPresent(guarantee, "register.producer.metrics", "false"); } @Test - public void testIncreasingRecordBasedCounters() throws Exception { + void testIncreasingRecordBasedCounters() throws Exception { final SinkWriterMetricGroup metricGroup = createSinkWriterMetricGroup(); try (final KafkaWriter writer = @@ -140,26 +140,26 @@ public void testIncreasingRecordBasedCounters() throws Exception { final Counter numRecordsOut = metricGroup.getIOMetricGroup().getNumRecordsOutCounter(); final Counter numRecordsOutErrors = metricGroup.getNumRecordsOutErrorsCounter(); final Counter numRecordsSendErrors = metricGroup.getNumRecordsSendErrorsCounter(); - assertThat(numBytesOut.getCount()).isEqualTo(0L); - assertThat(numRecordsOut.getCount()).isEqualTo(0); - assertThat(numRecordsOutErrors.getCount()).isEqualTo(0); - assertThat(numRecordsSendErrors.getCount()).isEqualTo(0); + assertThat(numBytesOut.getCount()).isZero(); + assertThat(numRecordsOut.getCount()).isZero(); + assertThat(numRecordsOutErrors.getCount()).isZero(); + assertThat(numRecordsSendErrors.getCount()).isZero(); // elements for which the serializer returns null should be silently skipped writer.write(null, SINK_WRITER_CONTEXT); timeService.trigger(); - assertThat(numBytesOut.getCount()).isEqualTo(0L); - assertThat(numRecordsOut.getCount()).isEqualTo(0); - assertThat(numRecordsOutErrors.getCount()).isEqualTo(0); - assertThat(numRecordsSendErrors.getCount()).isEqualTo(0); + assertThat(numBytesOut.getCount()).isZero(); + assertThat(numRecordsOut.getCount()).isZero(); + assertThat(numRecordsOutErrors.getCount()).isZero(); + assertThat(numRecordsSendErrors.getCount()).isZero(); // but elements for which a non-null producer record is returned should count writer.write(1, SINK_WRITER_CONTEXT); timeService.trigger(); assertThat(numRecordsOut.getCount()).isEqualTo(1); - assertThat(numRecordsOutErrors.getCount()).isEqualTo(0); - assertThat(numRecordsSendErrors.getCount()).isEqualTo(0); - assertThat(numBytesOut.getCount()).isGreaterThan(0L); + assertThat(numRecordsOutErrors.getCount()).isZero(); + assertThat(numRecordsSendErrors.getCount()).isZero(); + assertThat(numBytesOut.getCount()).isPositive(); } } @@ -170,8 +170,8 @@ public void testCurrentSendTimeMetric() throws Exception { getKafkaClientConfiguration(), DeliveryGuarantee.AT_LEAST_ONCE)) { final Optional> currentSendTime = metricListener.getGauge("currentSendTime"); - assertThat(currentSendTime.isPresent()).isTrue(); - assertThat(currentSendTime.get().getValue()).isEqualTo(0L); + assertThat(currentSendTime).isPresent(); + assertThat(currentSendTime.get().getValue()).isZero(); IntStream.range(0, 100) .forEach( (run) -> { @@ -185,7 +185,7 @@ public void testCurrentSendTimeMetric() throws Exception { throw new RuntimeException("Failed writing Kafka record."); } }); - assertThat(currentSendTime.get().getValue()).isGreaterThan(0L); + assertThat(currentSendTime.get().getValue()).isPositive(); } } @@ -199,7 +199,7 @@ void testFlushAsyncErrorPropagationAndErrorCounter() throws Exception { createWriterWithConfiguration( properties, DeliveryGuarantee.EXACTLY_ONCE, metricGroup); final Counter numRecordsOutErrors = metricGroup.getNumRecordsOutErrorsCounter(); - assertThat(numRecordsOutErrors.getCount()).isEqualTo(0L); + assertThat(numRecordsOutErrors.getCount()).isZero(); triggerProducerException(writer, properties); @@ -224,7 +224,7 @@ void testWriteAsyncErrorPropagationAndErrorCounter() throws Exception { createWriterWithConfiguration( properties, DeliveryGuarantee.EXACTLY_ONCE, metricGroup); final Counter numRecordsOutErrors = metricGroup.getNumRecordsOutErrorsCounter(); - assertThat(numRecordsOutErrors.getCount()).isEqualTo(0L); + assertThat(numRecordsOutErrors.getCount()).isZero(); triggerProducerException(writer, properties); // to ensure that the exceptional send request has completed @@ -252,7 +252,7 @@ void testMailboxAsyncErrorPropagationAndErrorCounter() throws Exception { properties, DeliveryGuarantee.EXACTLY_ONCE, sinkInitContext); final Counter numRecordsOutErrors = sinkInitContext.metricGroup.getNumRecordsOutErrorsCounter(); - assertThat(numRecordsOutErrors.getCount()).isEqualTo(0L); + assertThat(numRecordsOutErrors.getCount()).isZero(); triggerProducerException(writer, properties); // to ensure that the exceptional send request has completed @@ -279,7 +279,7 @@ void testCloseAsyncErrorPropagationAndErrorCounter() throws Exception { createWriterWithConfiguration( properties, DeliveryGuarantee.EXACTLY_ONCE, metricGroup); final Counter numRecordsOutErrors = metricGroup.getNumRecordsOutErrorsCounter(); - assertThat(numRecordsOutErrors.getCount()).isEqualTo(0L); + assertThat(numRecordsOutErrors.getCount()).isZero(); triggerProducerException(writer, properties); // to ensure that the exceptional send request has completed @@ -308,7 +308,7 @@ private void triggerProducerException(KafkaWriter writer, Properties pr } @Test - public void testMetadataPublisher() throws Exception { + void testMetadataPublisher() throws Exception { List metadataList = new ArrayList<>(); try (final KafkaWriter writer = createWriterWithConfiguration( @@ -351,7 +351,7 @@ void testLingeringTransaction() throws Exception { recoveredWriter.snapshotState(1); assertThat(committables).hasSize(1); final KafkaCommittable committable = committables.stream().findFirst().get(); - assertThat(committable.getProducer().isPresent()).isTrue(); + assertThat(committable.getProducer()).isPresent(); committable.getProducer().get().getObject().commitTransaction(); @@ -372,18 +372,18 @@ void testLingeringTransaction() throws Exception { void useSameProducerForNonTransactional(DeliveryGuarantee guarantee) throws Exception { try (final KafkaWriter writer = createWriterWithConfiguration(getKafkaClientConfiguration(), guarantee)) { - assertThat(writer.getProducerPool()).hasSize(0); + assertThat(writer.getProducerPool()).isEmpty(); FlinkKafkaInternalProducer firstProducer = writer.getCurrentProducer(); writer.flush(false); Collection committables = writer.prepareCommit(); writer.snapshotState(0); - assertThat(committables).hasSize(0); + assertThat(committables).isEmpty(); - assertThat(writer.getCurrentProducer() == firstProducer) + assertThat(writer.getCurrentProducer()) .as("Expected same producer") - .isTrue(); - assertThat(writer.getProducerPool()).hasSize(0); + .isSameAs(firstProducer); + assertThat(writer.getProducerPool()).isEmpty(); } } @@ -401,16 +401,16 @@ void usePoolForTransactional() throws Exception { writer.snapshotState(1); assertThat(committables0).hasSize(1); final KafkaCommittable committable = committables0.stream().findFirst().get(); - assertThat(committable.getProducer().isPresent()).isTrue(); + assertThat(committable.getProducer()).isPresent(); FlinkKafkaInternalProducer firstProducer = committable.getProducer().get().getObject(); assertThat(firstProducer != writer.getCurrentProducer()) .as("Expected different producer") - .isTrue(); + .isNotSameAs(writer.getCurrentProducer()); // recycle first producer, KafkaCommitter would commit it and then return it - assertThat(writer.getProducerPool()).hasSize(0); + assertThat(writer.getProducerPool()).isEmpty(); firstProducer.commitTransaction(); committable.getProducer().get().close(); assertThat(writer.getProducerPool()).hasSize(1); @@ -421,11 +421,11 @@ void usePoolForTransactional() throws Exception { writer.snapshotState(2); assertThat(committables1).hasSize(1); final KafkaCommittable committable1 = committables1.stream().findFirst().get(); - assertThat(committable1.getProducer().isPresent()).isTrue(); + assertThat(committable1.getProducer()).isPresent(); - assertThat(firstProducer == writer.getCurrentProducer()) + assertThat(firstProducer) .as("Expected recycled producer") - .isTrue(); + .isSameAs(writer.getCurrentProducer()); } } @@ -439,13 +439,13 @@ void prepareCommitForEmptyTransaction() throws Exception { try (final KafkaWriter writer = createWriterWithConfiguration( getKafkaClientConfiguration(), DeliveryGuarantee.EXACTLY_ONCE)) { - assertThat(writer.getProducerPool()).hasSize(0); + assertThat(writer.getProducerPool()).isEmpty(); // no data written to current transaction writer.flush(false); Collection emptyCommittables = writer.prepareCommit(); - assertThat(emptyCommittables).hasSize(0); + assertThat(emptyCommittables).isEmpty(); assertThat(writer.getProducerPool()).hasSize(1); final FlinkKafkaInternalProducer recycledProducer = writer.getProducerPool().pop(); @@ -463,7 +463,7 @@ void testAbortOnClose() throws Exception { try (final KafkaWriter writer = createWriterWithConfiguration(properties, DeliveryGuarantee.EXACTLY_ONCE)) { writer.write(1, SINK_WRITER_CONTEXT); - assertThat(drainAllRecordsFromTopic(topic, properties, true)).hasSize(0); + assertThat(drainAllRecordsFromTopic(topic, properties, true)).isEmpty(); } try (final KafkaWriter writer = diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterStateSerializerTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterStateSerializerTest.java index 3df0ea88c..d8c5549b1 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterStateSerializerTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterStateSerializerTest.java @@ -17,24 +17,25 @@ package org.apache.flink.connector.kafka.sink; -import org.apache.flink.util.TestLogger; - -import org.junit.Test; +import org.apache.flink.util.TestLoggerExtension; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.io.IOException; import static org.assertj.core.api.Assertions.assertThat; /** - * Tests for serializing and deserialzing {@link KafkaWriterState} with {@link + * Tests for serializing and deserializing {@link KafkaWriterState} with {@link * KafkaWriterStateSerializer}. */ -public class KafkaWriterStateSerializerTest extends TestLogger { +@ExtendWith(TestLoggerExtension.class) +class KafkaWriterStateSerializerTest { private static final KafkaWriterStateSerializer SERIALIZER = new KafkaWriterStateSerializer(); @Test - public void testStateSerDe() throws IOException { + void testStateSerDe() throws IOException { final KafkaWriterState state = new KafkaWriterState("idPrefix"); final byte[] serialized = SERIALIZER.serialize(state); assertThat(SERIALIZER.deserialize(1, serialized)).isEqualTo(state); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionIdFactoryTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionIdFactoryTest.java index 982d58e33..05d1277d5 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionIdFactoryTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionIdFactoryTest.java @@ -17,17 +17,19 @@ package org.apache.flink.connector.kafka.sink; -import org.apache.flink.util.TestLogger; +import org.apache.flink.util.TestLoggerExtension; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link TransactionalIdFactory}. */ -public class TransactionIdFactoryTest extends TestLogger { +@ExtendWith(TestLoggerExtension.class) +class TransactionIdFactoryTest { @Test - public void testBuildTransactionalId() { + void testBuildTransactionalId() { final String expected = "prefix-0-2"; assertThat(TransactionalIdFactory.buildTransactionalId("prefix", 0, 2L)) .isEqualTo(expected); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionToAbortCheckerTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionToAbortCheckerTest.java index 897a85911..ccf00f2c1 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionToAbortCheckerTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionToAbortCheckerTest.java @@ -17,9 +17,9 @@ package org.apache.flink.connector.kafka.sink; -import org.apache.flink.util.TestLogger; - -import org.junit.Test; +import org.apache.flink.util.TestLoggerExtension; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.util.Collections; import java.util.HashMap; @@ -29,13 +29,14 @@ import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link TransactionsToAbortChecker}. */ -public class TransactionToAbortCheckerTest extends TestLogger { +@ExtendWith(TestLoggerExtension.class) +class TransactionToAbortCheckerTest { public static final String ABORT = "abort"; public static final String KEEP = "keep"; @Test - public void testMustAbortTransactionsWithSameSubtaskIdAndHigherCheckpointOffset() { + void testMustAbortTransactionsWithSameSubtaskIdAndHigherCheckpointOffset() { Map offsetMapping = new HashMap<>(2); offsetMapping.put(0, 1L); offsetMapping.put(2, 3L); @@ -63,7 +64,7 @@ public void testMustAbortTransactionsWithSameSubtaskIdAndHigherCheckpointOffset( } @Test - public void testMustAbortTransactionsIfLowestCheckpointOffsetIsMinimumOffset() { + void testMustAbortTransactionsIfLowestCheckpointOffsetIsMinimumOffset() { final TransactionsToAbortChecker checker = new TransactionsToAbortChecker(2, Collections.singletonMap(0, 1L), 0); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceBuilderTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceBuilderTest.java index 236e9618f..64bacf28a 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceBuilderTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceBuilderTest.java @@ -42,10 +42,10 @@ /** Tests for {@link KafkaSourceBuilder}. */ @ExtendWith(TestLoggerExtension.class) -public class KafkaSourceBuilderTest { +class KafkaSourceBuilderTest { @Test - public void testBuildSourceWithGroupId() { + void testBuildSourceWithGroupId() { final KafkaSource kafkaSource = getBasicBuilder().setGroupId("groupId").build(); // Commit on checkpoint should be enabled by default assertThat( @@ -65,7 +65,7 @@ public void testBuildSourceWithGroupId() { } @Test - public void testBuildSourceWithoutGroupId() { + void testBuildSourceWithoutGroupId() { final KafkaSource kafkaSource = getBasicBuilder().build(); // Commit on checkpoint and auto commit should be disabled because group.id is not specified assertThat( @@ -84,7 +84,7 @@ public void testBuildSourceWithoutGroupId() { } @Test - public void testEnableCommitOnCheckpointWithoutGroupId() { + void testEnableCommitOnCheckpointWithoutGroupId() { assertThatThrownBy( () -> getBasicBuilder() @@ -99,7 +99,7 @@ public void testEnableCommitOnCheckpointWithoutGroupId() { } @Test - public void testEnableAutoCommitWithoutGroupId() { + void testEnableAutoCommitWithoutGroupId() { assertThatThrownBy( () -> getBasicBuilder() @@ -112,7 +112,7 @@ public void testEnableAutoCommitWithoutGroupId() { } @Test - public void testDisableOffsetCommitWithoutGroupId() { + void testDisableOffsetCommitWithoutGroupId() { getBasicBuilder() .setProperty(KafkaSourceOptions.COMMIT_OFFSETS_ON_CHECKPOINT.key(), "false") .build(); @@ -120,7 +120,7 @@ public void testDisableOffsetCommitWithoutGroupId() { } @Test - public void testUsingCommittedOffsetsInitializerWithoutGroupId() { + void testUsingCommittedOffsetsInitializerWithoutGroupId() { // Using OffsetsInitializer#committedOffsets as starting offsets assertThatThrownBy( () -> @@ -158,7 +158,7 @@ public void testUsingCommittedOffsetsInitializerWithoutGroupId() { } @Test - public void testSettingCustomKafkaSubscriber() { + void testSettingCustomKafkaSubscriber() { ExampleCustomSubscriber exampleCustomSubscriber = new ExampleCustomSubscriber(); KafkaSourceBuilder customKafkaSubscriberBuilder = new KafkaSourceBuilder() diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceITCase.java index 38ef80d51..277d38c9c 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceITCase.java @@ -83,7 +83,7 @@ import static org.assertj.core.api.Assertions.assertThat; /** Unite test class for {@link KafkaSource}. */ -public class KafkaSourceITCase { +class KafkaSourceITCase { private static final String TOPIC1 = "topic1"; private static final String TOPIC2 = "topic2"; @@ -167,8 +167,8 @@ public void testBasicRead(boolean enableObjectReuse) throws Exception { executeAndVerify(env, stream); } - @Test - public void testValueOnlyDeserializer() throws Exception { + @Test + void testValueOnlyDeserializer() throws Exception { KafkaSource source = KafkaSource.builder() .setBootstrapServers(KafkaSourceTestEnv.brokerConnectionStrings) @@ -264,8 +264,8 @@ public void testBasicReadWithoutGroupId(boolean enableObjectReuse) throws Except executeAndVerify(env, stream); } - @Test - public void testPerPartitionWatermark() throws Throwable { + @Test + void testPerPartitionWatermark() throws Throwable { String watermarkTopic = "watermarkTestTopic-" + UUID.randomUUID(); KafkaSourceTestEnv.createTestTopic(watermarkTopic, 2, 1); List> records = @@ -311,8 +311,8 @@ public void processElement( env.execute(); } - @Test - public void testConsumingEmptyTopic() throws Throwable { + @Test + void testConsumingEmptyTopic() throws Throwable { String emptyTopic = "emptyTopic-" + UUID.randomUUID(); KafkaSourceTestEnv.createTestTopic(emptyTopic, 3, 1); KafkaSource source = @@ -336,8 +336,8 @@ public void testConsumingEmptyTopic() throws Throwable { } } - @Test - public void testConsumingTopicWithEmptyPartitions() throws Throwable { + @Test + void testConsumingTopicWithEmptyPartitions() throws Throwable { String topicWithEmptyPartitions = "topicWithEmptyPartitions-" + UUID.randomUUID(); KafkaSourceTestEnv.createTestTopic( topicWithEmptyPartitions, KafkaSourceTestEnv.NUM_PARTITIONS, 1); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceLegacyITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceLegacyITCase.java index 5cc0ddf63..015800df6 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceLegacyITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceLegacyITCase.java @@ -23,75 +23,75 @@ import org.apache.flink.streaming.connectors.kafka.KafkaProducerTestBase; import org.apache.flink.streaming.connectors.kafka.KafkaTestEnvironmentImpl; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; /** * An IT case class that runs all the IT cases of the legacy {@link * org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer} with the new {@link KafkaSource}. */ -public class KafkaSourceLegacyITCase extends KafkaConsumerTestBase { +class KafkaSourceLegacyITCase extends KafkaConsumerTestBase { public KafkaSourceLegacyITCase() throws Exception { super(true); } - @BeforeClass - public static void prepare() throws Exception { + @BeforeAll + protected static void prepare() throws Exception { KafkaProducerTestBase.prepare(); ((KafkaTestEnvironmentImpl) kafkaServer) .setProducerSemantic(FlinkKafkaProducer.Semantic.AT_LEAST_ONCE); } @Test - public void testFailOnNoBroker() throws Exception { + void testFailOnNoBroker() throws Exception { runFailOnNoBrokerTest(); } @Test - public void testConcurrentProducerConsumerTopology() throws Exception { + void testConcurrentProducerConsumerTopology() throws Exception { runSimpleConcurrentProducerConsumerTopology(); } @Test - public void testKeyValueSupport() throws Exception { + void testKeyValueSupport() throws Exception { runKeyValueTest(); } // --- canceling / failures --- @Test - public void testCancelingEmptyTopic() throws Exception { + void testCancelingEmptyTopic() throws Exception { runCancelingOnEmptyInputTest(); } @Test - public void testCancelingFullTopic() throws Exception { + void testCancelingFullTopic() throws Exception { runCancelingOnFullInputTest(); } // --- source to partition mappings and exactly once --- @Test - public void testOneToOneSources() throws Exception { + void testOneToOneSources() throws Exception { runOneToOneExactlyOnceTest(); } @Test - public void testOneSourceMultiplePartitions() throws Exception { + void testOneSourceMultiplePartitions() throws Exception { runOneSourceMultiplePartitionsExactlyOnceTest(); } @Test - public void testMultipleSourcesOnePartition() throws Exception { + void testMultipleSourcesOnePartition() throws Exception { runMultipleSourcesOnePartitionExactlyOnceTest(); } // --- broker failure --- @Test - @Ignore("FLINK-28267") + @Disabled("FLINK-28267") public void testBrokerFailure() throws Exception { runBrokerFailureTest(); } @@ -99,66 +99,66 @@ public void testBrokerFailure() throws Exception { // --- special executions --- @Test - public void testBigRecordJob() throws Exception { + void testBigRecordJob() throws Exception { runBigRecordTestTopology(); } @Test - public void testMultipleTopicsWithLegacySerializer() throws Exception { + void testMultipleTopicsWithLegacySerializer() throws Exception { runProduceConsumeMultipleTopics(true); } @Test - public void testMultipleTopicsWithKafkaSerializer() throws Exception { + void testMultipleTopicsWithKafkaSerializer() throws Exception { runProduceConsumeMultipleTopics(false); } @Test - public void testAllDeletes() throws Exception { + void testAllDeletes() throws Exception { runAllDeletesTest(); } // --- startup mode --- @Test - public void testStartFromEarliestOffsets() throws Exception { + void testStartFromEarliestOffsets() throws Exception { runStartFromEarliestOffsets(); } @Test - public void testStartFromLatestOffsets() throws Exception { + void testStartFromLatestOffsets() throws Exception { runStartFromLatestOffsets(); } @Test - public void testStartFromGroupOffsets() throws Exception { + void testStartFromGroupOffsets() throws Exception { runStartFromGroupOffsets(); } @Test - public void testStartFromSpecificOffsets() throws Exception { + void testStartFromSpecificOffsets() throws Exception { runStartFromSpecificOffsets(); } @Test - public void testStartFromTimestamp() throws Exception { + void testStartFromTimestamp() throws Exception { runStartFromTimestamp(); } // --- offset committing --- @Test - public void testCommitOffsetsToKafka() throws Exception { + void testCommitOffsetsToKafka() throws Exception { runCommitOffsetsToKafka(); } @Test - public void testAutoOffsetRetrievalAndCommitToKafka() throws Exception { + void testAutoOffsetRetrievalAndCommitToKafka() throws Exception { runAutoOffsetRetrievalAndCommitToKafka(); } @Test - public void testCollectingSchema() throws Exception { + void testCollectingSchema() throws Exception { runCollectingSchemaTest(); } } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaEnumeratorTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaEnumeratorTest.java index 8b308af16..4d7eb3282 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaEnumeratorTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaEnumeratorTest.java @@ -35,9 +35,10 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.StringDeserializer; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.util.ArrayList; import java.util.Arrays; @@ -50,13 +51,14 @@ import java.util.Properties; import java.util.Set; import java.util.StringJoiner; +import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import java.util.stream.Collectors; import static org.assertj.core.api.Assertions.assertThat; /** Unit tests for {@link KafkaSourceEnumerator}. */ -public class KafkaEnumeratorTest { +class KafkaEnumeratorTest { private static final int NUM_SUBTASKS = 3; private static final String DYNAMIC_TOPIC_NAME = "dynamic_topic"; private static final int NUM_PARTITIONS_DYNAMIC_TOPIC = 4; @@ -75,26 +77,26 @@ public class KafkaEnumeratorTest { private static final boolean INCLUDE_DYNAMIC_TOPIC = true; private static final boolean EXCLUDE_DYNAMIC_TOPIC = false; - @BeforeClass - public static void setup() throws Throwable { + @BeforeAll + static void setup() throws Throwable { KafkaSourceTestEnv.setup(); KafkaSourceTestEnv.setupTopic(TOPIC1, true, true, KafkaSourceTestEnv::getRecordsForTopic); KafkaSourceTestEnv.setupTopic(TOPIC2, true, true, KafkaSourceTestEnv::getRecordsForTopic); } - @AfterClass - public static void tearDown() throws Exception { + @AfterAll + static void tearDown() throws Exception { KafkaSourceTestEnv.tearDown(); } @Test - public void testStartWithDiscoverPartitionsOnce() throws Exception { + void testStartWithDiscoverPartitionsOnce() throws Exception { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = createEnumerator(context, DISABLE_PERIODIC_PARTITION_DISCOVERY)) { - // Start the enumerator and it should schedule a one time task to discover and assign + // Start the enumerator, and it should schedule a one time task to discover and assign // partitions. enumerator.start(); assertThat(context.getPeriodicCallables()).isEmpty(); @@ -109,13 +111,13 @@ public void testStartWithDiscoverPartitionsOnce() throws Exception { } @Test - public void testStartWithPeriodicPartitionDiscovery() throws Exception { + void testStartWithPeriodicPartitionDiscovery() throws Exception { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = createEnumerator(context, ENABLE_PERIODIC_PARTITION_DISCOVERY)) { - // Start the enumerator and it should schedule a one time task to discover and assign + // Start the enumerator, and it should schedule a one time task to discover and assign // partitions. enumerator.start(); assertThat(context.getOneTimeCallables()).isEmpty(); @@ -130,13 +132,13 @@ public void testStartWithPeriodicPartitionDiscovery() throws Exception { } @Test - public void testDiscoverPartitionsTriggersAssignments() throws Throwable { + void testDiscoverPartitionsTriggersAssignments() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = createEnumerator(context, DISABLE_PERIODIC_PARTITION_DISCOVERY)) { - // Start the enumerator and it should schedule a one time task to discover and assign + // Start the enumerator, and it should schedule a one time task to discover and assign // partitions. enumerator.start(); @@ -155,13 +157,13 @@ public void testDiscoverPartitionsTriggersAssignments() throws Throwable { } @Test - public void testReaderRegistrationTriggersAssignments() throws Throwable { + void testReaderRegistrationTriggersAssignments() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = createEnumerator(context, DISABLE_PERIODIC_PARTITION_DISCOVERY)) { - // Start the enumerator and it should schedule a one time task to discover and assign + // Start the enumerator, and it should schedule a one time task to discover and assign // partitions. enumerator.start(); runOneTimePartitionDiscovery(context); @@ -178,13 +180,13 @@ public void testReaderRegistrationTriggersAssignments() throws Throwable { } @Test - public void testRunWithDiscoverPartitionsOnceToCheckNoMoreSplit() throws Throwable { + void testRunWithDiscoverPartitionsOnceToCheckNoMoreSplit() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = createEnumerator(context, DISABLE_PERIODIC_PARTITION_DISCOVERY)) { - // Start the enumerator and it should schedule a one time task to discover and assign + // Start the enumerator, and it should schedule a one time task to discover and assign // partitions. enumerator.start(); assertThat(context.getOneTimeCallables()) @@ -202,13 +204,13 @@ public void testRunWithDiscoverPartitionsOnceToCheckNoMoreSplit() throws Throwab } @Test - public void testRunWithPeriodicPartitionDiscoveryOnceToCheckNoMoreSplit() throws Throwable { + void testRunWithPeriodicPartitionDiscoveryOnceToCheckNoMoreSplit() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = createEnumerator(context, ENABLE_PERIODIC_PARTITION_DISCOVERY)) { - // Start the enumerator and it should schedule a one time task to discover and assign + // Start the enumerator, and it should schedule a one time task to discover and assign // partitions. enumerator.start(); assertThat(context.getOneTimeCallables()).isEmpty(); @@ -226,7 +228,7 @@ public void testRunWithPeriodicPartitionDiscoveryOnceToCheckNoMoreSplit() throws } @Test - public void testRunWithDiscoverPartitionsOnceWithZeroMsToCheckNoMoreSplit() throws Throwable { + void testRunWithDiscoverPartitionsOnceWithZeroMsToCheckNoMoreSplit() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); // Disable periodic partition discovery @@ -249,8 +251,9 @@ public void testRunWithDiscoverPartitionsOnceWithZeroMsToCheckNoMoreSplit() thro } } - @Test(timeout = 30000L) - public void testDiscoverPartitionsPeriodically() throws Throwable { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testDiscoverPartitionsPeriodically() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = @@ -317,7 +320,7 @@ public void testDiscoverPartitionsPeriodically() throws Throwable { } @Test - public void testAddSplitsBack() throws Throwable { + void testAddSplitsBack() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = @@ -342,7 +345,7 @@ public void testAddSplitsBack() throws Throwable { } @Test - public void testWorkWithPreexistingAssignments() throws Throwable { + void testWorkWithPreexistingAssignments() throws Throwable { Set preexistingAssignments; try (MockSplitEnumeratorContext context1 = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); @@ -377,7 +380,7 @@ public void testWorkWithPreexistingAssignments() throws Throwable { } @Test - public void testKafkaClientProperties() throws Exception { + void testKafkaClientProperties() throws Exception { Properties properties = new Properties(); String clientIdPrefix = "test-prefix"; Integer defaultTimeoutMs = 99999; @@ -410,7 +413,7 @@ public void testKafkaClientProperties() throws Exception { } @Test - public void testSnapshotState() throws Throwable { + void testSnapshotState() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = createEnumerator(context, false)) { @@ -463,7 +466,7 @@ public void testSnapshotState() throws Throwable { } @Test - public void testPartitionChangeChecking() throws Throwable { + void testPartitionChangeChecking() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = @@ -483,7 +486,7 @@ public void testPartitionChangeChecking() throws Throwable { final KafkaSourceEnumerator.PartitionChange partitionChange = enumerator.getPartitionChange(fetchedPartitions); - // Since enumerator never met DYNAMIC_TOPIC_NAME-0, it should be mark as a new partition + // Since enumerator never met DYNAMIC_TOPIC_NAME-0, it should be marked as a new partition Set expectedNewPartitions = Collections.singleton(newPartition); // All existing topics are not in the fetchedPartitions, so they should be marked as @@ -500,7 +503,7 @@ public void testPartitionChangeChecking() throws Throwable { } @Test - public void testEnablePartitionDiscoveryByDefault() throws Throwable { + void testEnablePartitionDiscoveryByDefault() throws Throwable { try (MockSplitEnumeratorContext context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS); KafkaSourceEnumerator enumerator = createEnumerator(context, new Properties())) { @@ -514,7 +517,7 @@ public void testEnablePartitionDiscoveryByDefault() throws Throwable { } @Test - public void testDisablePartitionDiscovery() throws Throwable { + void testDisablePartitionDiscovery() throws Throwable { Properties props = new Properties(); props.setProperty( KafkaSourceOptions.PARTITION_DISCOVERY_INTERVAL_MS.key(), String.valueOf(0)); @@ -532,7 +535,7 @@ private void startEnumeratorAndRegisterReaders( MockSplitEnumeratorContext context, KafkaSourceEnumerator enumerator) throws Throwable { - // Start the enumerator and it should schedule a one time task to discover and assign + // Start the enumerator, and it should schedule a one time task to discover and assign // partitions. enumerator.start(); @@ -677,7 +680,7 @@ private void verifyAssignments( Set expectedAssignmentsForReader = expectedAssignments.get(reader); assertThat(expectedAssignmentsForReader).isNotNull(); - assertThat(splits.size()).isEqualTo(expectedAssignmentsForReader.size()); + assertThat(splits).hasSameSizeAs(expectedAssignmentsForReader); for (KafkaPartitionSplit split : splits) { assertThat(expectedAssignmentsForReader) .contains(split.getTopicPartition()); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaSourceEnumStateSerializerTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaSourceEnumStateSerializerTest.java index 6c172e4a2..fbab252e2 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaSourceEnumStateSerializerTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaSourceEnumStateSerializerTest.java @@ -23,7 +23,7 @@ import org.apache.flink.connector.kafka.source.split.KafkaPartitionSplitSerializer; import org.apache.kafka.common.TopicPartition; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.Collection; @@ -35,7 +35,7 @@ import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link KafkaSourceEnumStateSerializer}. */ -public class KafkaSourceEnumStateSerializerTest { +class KafkaSourceEnumStateSerializerTest { private static final int NUM_READERS = 10; private static final String TOPIC_PREFIX = "topic-"; @@ -43,7 +43,7 @@ public class KafkaSourceEnumStateSerializerTest { private static final long STARTING_OFFSET = KafkaPartitionSplit.EARLIEST_OFFSET; @Test - public void testEnumStateSerde() throws IOException { + void testEnumStateSerde() throws IOException { final KafkaSourceEnumState state = new KafkaSourceEnumState( constructTopicPartitions(0), @@ -63,7 +63,7 @@ public void testEnumStateSerde() throws IOException { } @Test - public void testBackwardCompatibility() throws IOException { + void testBackwardCompatibility() throws IOException { final Set topicPartitions = constructTopicPartitions(0); final Map> splitAssignments = diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/initializer/OffsetsInitializerTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/initializer/OffsetsInitializerTest.java index 46dd61a6f..12da5b5dc 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/initializer/OffsetsInitializerTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/initializer/OffsetsInitializerTest.java @@ -25,9 +25,9 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.TopicPartition; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import java.util.Collections; import java.util.HashMap; @@ -36,16 +36,17 @@ import java.util.stream.Collectors; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Unit tests for {@link OffsetsInitializer}. */ -public class OffsetsInitializerTest { +class OffsetsInitializerTest { private static final String TOPIC = "topic"; private static final String TOPIC2 = "topic2"; private static final String EMPTY_TOPIC3 = "topic3"; private static KafkaSourceEnumerator.PartitionOffsetsRetrieverImpl retriever; - @BeforeClass - public static void setup() throws Throwable { + @BeforeAll + static void setup() throws Throwable { KafkaSourceTestEnv.setup(); KafkaSourceTestEnv.setupTopic(TOPIC, true, true, KafkaSourceTestEnv::getRecordsForTopic); KafkaSourceTestEnv.setupTopic(TOPIC2, false, false, KafkaSourceTestEnv::getRecordsForTopic); @@ -56,14 +57,14 @@ public static void setup() throws Throwable { KafkaSourceTestEnv.getAdminClient(), KafkaSourceTestEnv.GROUP_ID); } - @AfterClass - public static void tearDown() throws Exception { + @AfterAll + static void tearDown() throws Exception { retriever.close(); KafkaSourceTestEnv.tearDown(); } @Test - public void testEarliestOffsetsInitializer() { + void testEarliestOffsetsInitializer() { OffsetsInitializer initializer = OffsetsInitializer.earliest(); List partitions = KafkaSourceTestEnv.getPartitionsForTopic(TOPIC); Map offsets = initializer.getPartitionOffsets(partitions, retriever); @@ -77,7 +78,7 @@ public void testEarliestOffsetsInitializer() { } @Test - public void testLatestOffsetsInitializer() { + void testLatestOffsetsInitializer() { OffsetsInitializer initializer = OffsetsInitializer.latest(); List partitions = KafkaSourceTestEnv.getPartitionsForTopic(TOPIC); Map offsets = initializer.getPartitionOffsets(partitions, retriever); @@ -90,7 +91,7 @@ public void testLatestOffsetsInitializer() { } @Test - public void testCommittedGroupOffsetsInitializer() { + void testCommittedGroupOffsetsInitializer() { OffsetsInitializer initializer = OffsetsInitializer.committedOffsets(); List partitions = KafkaSourceTestEnv.getPartitionsForTopic(TOPIC); Map offsets = initializer.getPartitionOffsets(partitions, retriever); @@ -102,7 +103,7 @@ public void testCommittedGroupOffsetsInitializer() { } @Test - public void testTimestampOffsetsInitializer() { + void testTimestampOffsetsInitializer() { OffsetsInitializer initializer = OffsetsInitializer.timestamp(2001); List partitions = KafkaSourceTestEnv.getPartitionsForTopic(TOPIC); Map offsets = initializer.getPartitionOffsets(partitions, retriever); @@ -115,7 +116,7 @@ public void testTimestampOffsetsInitializer() { } @Test - public void testTimestampOffsetsInitializerForEmptyPartitions() { + void testTimestampOffsetsInitializerForEmptyPartitions() { OffsetsInitializer initializer = OffsetsInitializer.timestamp(2001); List partitions = KafkaSourceTestEnv.getPartitionsForTopic(EMPTY_TOPIC3); Map expectedOffsets = @@ -127,7 +128,7 @@ public void testTimestampOffsetsInitializerForEmptyPartitions() { } @Test - public void testSpecificOffsetsInitializer() { + void testSpecificOffsetsInitializer() { Map specifiedOffsets = new HashMap<>(); List partitions = KafkaSourceTestEnv.getPartitionsForTopic(TOPIC); Map committedOffsets = @@ -161,10 +162,14 @@ public void testSpecificOffsetsInitializer() { } } - @Test(expected = IllegalStateException.class) - public void testSpecifiedOffsetsInitializerWithoutOffsetResetStrategy() { + @Test + void testSpecifiedOffsetsInitializerWithoutOffsetResetStrategy() { OffsetsInitializer initializer = OffsetsInitializer.offsets(Collections.emptyMap(), OffsetResetStrategy.NONE); - initializer.getPartitionOffsets(KafkaSourceTestEnv.getPartitionsForTopic(TOPIC), retriever); + assertThatThrownBy( + () -> + initializer.getPartitionOffsets( + KafkaSourceTestEnv.getPartitionsForTopic(TOPIC), retriever)) + .isInstanceOf(IllegalStateException.class); } } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/KafkaSubscriberTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/KafkaSubscriberTest.java index 258c1c0ab..d83e9e09f 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/KafkaSubscriberTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/KafkaSubscriberTest.java @@ -23,9 +23,9 @@ import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.Collections; @@ -39,28 +39,28 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Unit tests for {@link KafkaSubscriber}. */ -public class KafkaSubscriberTest { +class KafkaSubscriberTest { private static final String TOPIC1 = "topic1"; private static final String TOPIC2 = "pattern-topic"; private static final TopicPartition NON_EXISTING_TOPIC = new TopicPartition("removed", 0); private static AdminClient adminClient; - @BeforeClass - public static void setup() throws Throwable { + @BeforeAll + static void setup() throws Throwable { KafkaSourceTestEnv.setup(); KafkaSourceTestEnv.createTestTopic(TOPIC1); KafkaSourceTestEnv.createTestTopic(TOPIC2); adminClient = KafkaSourceTestEnv.getAdminClient(); } - @AfterClass - public static void tearDown() throws Exception { + @AfterAll + static void tearDown() throws Exception { adminClient.close(); KafkaSourceTestEnv.tearDown(); } @Test - public void testTopicListSubscriber() { + void testTopicListSubscriber() { List topics = Arrays.asList(TOPIC1, TOPIC2); KafkaSubscriber subscriber = KafkaSubscriber.getTopicListSubscriber(Arrays.asList(TOPIC1, TOPIC2)); @@ -74,7 +74,7 @@ public void testTopicListSubscriber() { } @Test - public void testNonExistingTopic() { + void testNonExistingTopic() { final KafkaSubscriber subscriber = KafkaSubscriber.getTopicListSubscriber( Collections.singletonList(NON_EXISTING_TOPIC.topic())); @@ -85,7 +85,7 @@ public void testNonExistingTopic() { } @Test - public void testTopicPatternSubscriber() { + void testTopicPatternSubscriber() { KafkaSubscriber subscriber = KafkaSubscriber.getTopicPatternSubscriber(Pattern.compile("pattern.*")); final Set subscribedPartitions = @@ -99,7 +99,7 @@ public void testTopicPatternSubscriber() { } @Test - public void testPartitionSetSubscriber() { + void testPartitionSetSubscriber() { List topics = Arrays.asList(TOPIC1, TOPIC2); Set partitions = new HashSet<>(KafkaSourceTestEnv.getPartitionsForTopics(topics)); @@ -114,7 +114,7 @@ public void testPartitionSetSubscriber() { } @Test - public void testNonExistingPartition() { + void testNonExistingPartition() { TopicPartition nonExistingPartition = new TopicPartition(TOPIC1, Integer.MAX_VALUE); final KafkaSubscriber subscriber = KafkaSubscriber.getPartitionSetSubscriber( diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/metrics/KafkaSourceReaderMetricsTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/metrics/KafkaSourceReaderMetricsTest.java index d9dd1d3b0..a14576403 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/metrics/KafkaSourceReaderMetricsTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/metrics/KafkaSourceReaderMetricsTest.java @@ -24,7 +24,7 @@ import org.apache.flink.runtime.metrics.groups.InternalSourceReaderMetricGroup; import org.apache.kafka.common.TopicPartition; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Optional; @@ -42,7 +42,7 @@ public class KafkaSourceReaderMetricsTest { private static final TopicPartition BAR_1 = new TopicPartition("bar", 1); @Test - public void testCurrentOffsetTracking() { + void testCurrentOffsetTracking() { MetricListener metricListener = new MetricListener(); final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = @@ -66,7 +66,7 @@ public void testCurrentOffsetTracking() { } @Test - public void testCommitOffsetTracking() { + void testCommitOffsetTracking() { MetricListener metricListener = new MetricListener(); final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = @@ -101,7 +101,7 @@ public void testCommitOffsetTracking() { } @Test - public void testNonTrackingTopicPartition() { + void testNonTrackingTopicPartition() { MetricListener metricListener = new MetricListener(); final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics( @@ -113,7 +113,7 @@ public void testNonTrackingTopicPartition() { } @Test - public void testFailedCommit() { + void testFailedCommit() { MetricListener metricListener = new MetricListener(); final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics( diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaPartitionSplitReaderTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaPartitionSplitReaderTest.java index b592a6917..9103fcddf 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaPartitionSplitReaderTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaPartitionSplitReaderTest.java @@ -100,14 +100,14 @@ public static void tearDown() throws Exception { } @Test - public void testHandleSplitChangesAndFetch() throws Exception { + void testHandleSplitChangesAndFetch() throws Exception { KafkaPartitionSplitReader reader = createReader(); assignSplitsAndFetchUntilFinish(reader, 0); assignSplitsAndFetchUntilFinish(reader, 1); } @Test - public void testWakeUp() throws Exception { + void testWakeUp() throws Exception { KafkaPartitionSplitReader reader = createReader(); TopicPartition nonExistingTopicPartition = new TopicPartition("NotExist", 0); assignSplits( @@ -136,7 +136,7 @@ public void testWakeUp() throws Exception { } @Test - public void testWakeupThenAssign() throws IOException { + void testWakeupThenAssign() throws IOException { KafkaPartitionSplitReader reader = createReader(); // Assign splits with records assignSplits(reader, splitsByOwners.get(0)); @@ -154,7 +154,7 @@ public void testWakeupThenAssign() throws IOException { } @Test - public void testNumBytesInCounter() throws Exception { + void testNumBytesInCounter() throws Exception { final OperatorMetricGroup operatorMetricGroup = UnregisteredMetricGroups.createUnregisteredOperatorMetricGroup(); final Counter numBytesInCounter = @@ -236,7 +236,7 @@ public void testPendingRecordsGauge(String topicSuffix) throws Throwable { } @Test - public void testAssignEmptySplit() throws Exception { + void testAssignEmptySplit() throws Exception { KafkaPartitionSplitReader reader = createReader(); final KafkaPartitionSplit normalSplit = new KafkaPartitionSplit( @@ -276,7 +276,7 @@ public void testAssignEmptySplit() throws Exception { } @Test - public void testUsingCommittedOffsetsWithNoneOffsetResetStrategy() { + void testUsingCommittedOffsetsWithNoneOffsetResetStrategy() { final Properties props = new Properties(); props.setProperty( ConsumerConfig.GROUP_ID_CONFIG, "using-committed-offset-with-none-offset-reset"); @@ -321,7 +321,7 @@ public void testUsingCommittedOffsetsWithEarliestOrLatestOffsetResetStrategy( } @Test - public void testConsumerClientRackSupplier() { + void testConsumerClientRackSupplier() { String rackId = "use1-az1"; Properties properties = new Properties(); KafkaPartitionSplitReader reader = diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java index f5aa7f5fd..47aad9030 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java @@ -461,7 +461,7 @@ void testAssigningEmptySplitOnly() throws Exception { } @Test - public void testSupportsPausingOrResumingSplits() throws Exception { + void testSupportsPausingOrResumingSplits() throws Exception { final Set finishedSplits = new HashSet<>(); try (final KafkaSourceReader reader = @@ -507,7 +507,7 @@ public void testSupportsPausingOrResumingSplits() throws Exception { } @Test - public void testThatReaderDoesNotCallRackIdSupplierOnInit() throws Exception { + void testThatReaderDoesNotCallRackIdSupplierOnInit() throws Exception { SerializableSupplier rackIdSupplier = Mockito.mock(SerializableSupplier.class); try (KafkaSourceReader reader = @@ -525,7 +525,7 @@ public void testThatReaderDoesNotCallRackIdSupplierOnInit() throws Exception { } @Test - public void testThatReaderDoesCallRackIdSupplierOnSplitAssignment() throws Exception { + void testThatReaderDoesCallRackIdSupplierOnSplitAssignment() throws Exception { SerializableSupplier rackIdSupplier = Mockito.mock(SerializableSupplier.class); Mockito.when(rackIdSupplier.get()).thenReturn("use1-az1"); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchemaTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchemaTest.java index b0ca63161..af53b0c3c 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchemaTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchemaTest.java @@ -32,8 +32,8 @@ import org.apache.kafka.common.Configurable; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.Collections; @@ -44,7 +44,7 @@ import static org.assertj.core.api.Assertions.assertThat; /** Unit tests for KafkaRecordDeserializationSchema. */ -public class KafkaRecordDeserializationSchemaTest { +class KafkaRecordDeserializationSchemaTest { private static final ObjectMapper OBJECT_MAPPER = JacksonMapperFactory.createObjectMapper(); @@ -52,15 +52,15 @@ public class KafkaRecordDeserializationSchemaTest { private static Map configuration; private static boolean isKeyDeserializer; - @Before - public void setUp() { + @BeforeEach + void setUp() { configurableConfiguration = new HashMap<>(1); configuration = new HashMap<>(1); isKeyDeserializer = false; } @Test - public void testKafkaDeserializationSchemaWrapper() throws Exception { + void testKafkaDeserializationSchemaWrapper() throws Exception { final ConsumerRecord consumerRecord = getConsumerRecord(); KafkaRecordDeserializationSchema schema = KafkaRecordDeserializationSchema.of(new JSONKeyValueDeserializationSchema(true)); @@ -79,7 +79,7 @@ public void testKafkaDeserializationSchemaWrapper() throws Exception { } @Test - public void testKafkaValueDeserializationSchemaWrapper() throws Exception { + void testKafkaValueDeserializationSchemaWrapper() throws Exception { final ConsumerRecord consumerRecord = getConsumerRecord(); KafkaRecordDeserializationSchema< org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node @@ -105,7 +105,7 @@ public void testKafkaValueDeserializationSchemaWrapper() throws Exception { } @Test - public void testKafkaValueDeserializerWrapper() throws Exception { + void testKafkaValueDeserializerWrapper() throws Exception { final String topic = "Topic"; byte[] value = new StringSerializer().serialize(topic, "world"); final ConsumerRecord consumerRecord = @@ -122,7 +122,7 @@ public void testKafkaValueDeserializerWrapper() throws Exception { } @Test - public void testKafkaValueDeserializerWrapperWithoutConfigurable() throws Exception { + void testKafkaValueDeserializerWrapperWithoutConfigurable() throws Exception { final Map config = Collections.singletonMap("simpleKey", "simpleValue"); KafkaRecordDeserializationSchema schema = KafkaRecordDeserializationSchema.valueOnly(SimpleStringSerializer.class, config); @@ -133,7 +133,7 @@ public void testKafkaValueDeserializerWrapperWithoutConfigurable() throws Except } @Test - public void testKafkaValueDeserializerWrapperWithConfigurable() throws Exception { + void testKafkaValueDeserializerWrapperWithConfigurable() throws Exception { final Map config = Collections.singletonMap("configKey", "configValue"); KafkaRecordDeserializationSchema schema = KafkaRecordDeserializationSchema.valueOnly( diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/split/KafkaPartitionSplitSerializerTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/split/KafkaPartitionSplitSerializerTest.java index db7647242..f73392310 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/split/KafkaPartitionSplitSerializerTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/split/KafkaPartitionSplitSerializerTest.java @@ -30,7 +30,7 @@ public class KafkaPartitionSplitSerializerTest { @Test - public void testSerializer() throws IOException { + void testSerializer() throws IOException { String topic = "topic"; Long offsetZero = 0L; Long normalOffset = 1L; diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java index 8db2e596c..3693ec28d 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java @@ -20,12 +20,12 @@ import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.assertj.core.api.Assertions.assertThat; /** Tests for the {@link FlinkFixedPartitioner}. */ -public class FlinkFixedPartitionerTest { +class FlinkFixedPartitionerTest { /** * Test for when there are more sinks than partitions. @@ -39,7 +39,7 @@ public class FlinkFixedPartitionerTest { * */ @Test - public void testMoreFlinkThanBrokers() { + void testMoreFlinkThanBrokers() { FlinkFixedPartitioner part = new FlinkFixedPartitioner<>(); int[] partitions = new int[] {0}; @@ -73,7 +73,7 @@ public void testMoreFlinkThanBrokers() { * */ @Test - public void testFewerPartitions() { + void testFewerPartitions() { FlinkFixedPartitioner part = new FlinkFixedPartitioner<>(); int[] partitions = new int[] {0, 1, 2, 3, 4}; @@ -93,7 +93,7 @@ public void testFewerPartitions() { * 3 ----------/ */ @Test - public void testMixedCase() { + void testMixedCase() { FlinkFixedPartitioner part = new FlinkFixedPartitioner<>(); int[] partitions = new int[] {0, 1}; diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java index 296545cad..62032f014 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java @@ -36,12 +36,13 @@ import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicsDescriptor; import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; import org.apache.flink.streaming.util.OperatorSnapshotUtil; +import org.apache.flink.testutils.junit.extensions.parameterized.ParameterizedTestExtension; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameters; import org.apache.flink.util.SerializedValue; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -66,7 +67,7 @@ *

For regenerating the binary snapshot files run {@link #writeSnapshot()} on the corresponding * Flink release-* branch. */ -@RunWith(Parameterized.class) +@ExtendWith(ParameterizedTestExtension.class) public class FlinkKafkaConsumerBaseMigrationTest { /** @@ -90,7 +91,7 @@ public class FlinkKafkaConsumerBaseMigrationTest { private final FlinkVersion testMigrateVersion; - @Parameterized.Parameters(name = "Migration Savepoint: {0}") + @Parameters(name = "Migration Savepoint: {0}") public static Collection parameters() { return FlinkVersion.rangeOf(FlinkVersion.v1_8, FlinkVersion.v1_16); } @@ -100,9 +101,9 @@ public FlinkKafkaConsumerBaseMigrationTest(FlinkVersion testMigrateVersion) { } /** Manually run this to write binary snapshot data. */ - @Ignore + @Disabled @Test - public void writeSnapshot() throws Exception { + void writeSnapshot() throws Exception { writeSnapshot( "src/test/resources/kafka-consumer-migration-test-flink" + flinkGenerateSavepointVersion @@ -194,7 +195,7 @@ public void collect(String element) {} /** Test restoring from an legacy empty state, when no partitions could be found for topics. */ @Test - public void testRestoreFromEmptyStateNoPartitions() throws Exception { + void testRestoreFromEmptyStateNoPartitions() throws Exception { final DummyFlinkKafkaConsumer consumerFunction = new DummyFlinkKafkaConsumer<>( Collections.singletonList("dummy-topic"), @@ -235,7 +236,7 @@ public void testRestoreFromEmptyStateNoPartitions() throws Exception { * could be found for topics. */ @Test - public void testRestoreFromEmptyStateWithPartitions() throws Exception { + void testRestoreFromEmptyStateWithPartitions() throws Exception { final List partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer consumerFunction = @@ -295,7 +296,7 @@ public void testRestoreFromEmptyStateWithPartitions() throws Exception { * partitions could be found for topics. */ @Test - public void testRestore() throws Exception { + void testRestore() throws Exception { final List partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer consumerFunction = diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java index a2438165e..28682b405 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java @@ -33,6 +33,7 @@ import org.apache.flink.api.java.typeutils.runtime.TupleSerializer; import org.apache.flink.configuration.Configuration; import org.apache.flink.core.testutils.CheckedThread; +import org.apache.flink.core.testutils.FlinkAssertions; import org.apache.flink.core.testutils.OneShotLatch; import org.apache.flink.metrics.MetricGroup; import org.apache.flink.metrics.groups.UnregisteredMetricsGroup; @@ -60,16 +61,16 @@ import org.apache.flink.streaming.util.MockDeserializationSchema; import org.apache.flink.streaming.util.MockStreamingRuntimeContext; import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema; -import org.apache.flink.util.ExceptionUtils; import org.apache.flink.util.FlinkException; import org.apache.flink.util.InstantiationUtil; import org.apache.flink.util.Preconditions; import org.apache.flink.util.SerializedValue; -import org.apache.flink.util.TestLogger; +import org.apache.flink.util.TestLoggerExtension; import org.apache.flink.util.function.SupplierWithException; import org.apache.flink.util.function.ThrowingRunnable; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import javax.annotation.Nonnull; @@ -102,26 +103,26 @@ import static org.mockito.Mockito.mock; /** Tests for the {@link FlinkKafkaConsumerBase}. */ -public class FlinkKafkaConsumerBaseTest extends TestLogger { +@ExtendWith(TestLoggerExtension.class) +class FlinkKafkaConsumerBaseTest { private static final int maxParallelism = Short.MAX_VALUE / 2; /** Tests that not both types of timestamp extractors / watermark generators can be used. */ @Test @SuppressWarnings("unchecked") - public void testEitherWatermarkExtractor() { + void testEitherWatermarkExtractor() { + final FlinkKafkaConsumerBase consumer = new DummyFlinkKafkaConsumer(); assertThatThrownBy( () -> - new DummyFlinkKafkaConsumer() - .assignTimestampsAndWatermarks( - (AssignerWithPeriodicWatermarks) null)) + consumer.assignTimestampsAndWatermarks( + (AssignerWithPeriodicWatermarks) null)) .isInstanceOf(NullPointerException.class); assertThatThrownBy( () -> - new DummyFlinkKafkaConsumer() - .assignTimestampsAndWatermarks( - (AssignerWithPunctuatedWatermarks) null)) + consumer.assignTimestampsAndWatermarks( + (AssignerWithPunctuatedWatermarks) null)) .isInstanceOf(NullPointerException.class); final AssignerWithPeriodicWatermarks periodicAssigner = @@ -142,8 +143,7 @@ public void testEitherWatermarkExtractor() { /** Tests that no checkpoints happen when the fetcher is not running. */ @Test - public void ignoreCheckpointWhenNotRunning() throws Exception { - @SuppressWarnings("unchecked") + void ignoreCheckpointWhenNotRunning() throws Exception { final MockFetcher fetcher = new MockFetcher<>(); final FlinkKafkaConsumerBase consumer = new DummyFlinkKafkaConsumer<>( @@ -162,7 +162,7 @@ public void ignoreCheckpointWhenNotRunning() throws Exception { // acknowledgement of the checkpoint should also not result in any offset commits consumer.notifyCheckpointComplete(1L); assertThat(fetcher.getAndClearLastCommittedOffsets()).isNull(); - assertThat(fetcher.getCommitCount()).isEqualTo(0); + assertThat(fetcher.getCommitCount()).isZero(); } /** @@ -170,8 +170,7 @@ public void ignoreCheckpointWhenNotRunning() throws Exception { * correctly contains the restored state instead. */ @Test - public void checkRestoredCheckpointWhenFetcherNotReady() throws Exception { - @SuppressWarnings("unchecked") + void checkRestoredCheckpointWhenFetcherNotReady() throws Exception { final FlinkKafkaConsumerBase consumer = new DummyFlinkKafkaConsumer<>(); final TestingListState> restoredListState = @@ -203,8 +202,7 @@ public void checkRestoredCheckpointWhenFetcherNotReady() throws Exception { } @Test - public void testConfigureOnCheckpointsCommitMode() throws Exception { - @SuppressWarnings("unchecked") + void testConfigureOnCheckpointsCommitMode() throws Exception { // auto-commit enabled; this should be ignored in this case final DummyFlinkKafkaConsumer consumer = new DummyFlinkKafkaConsumer<>(true); @@ -216,8 +214,7 @@ public void testConfigureOnCheckpointsCommitMode() throws Exception { } @Test - public void testConfigureAutoCommitMode() throws Exception { - @SuppressWarnings("unchecked") + void testConfigureAutoCommitMode() throws Exception { final DummyFlinkKafkaConsumer consumer = new DummyFlinkKafkaConsumer<>(true); setupConsumer(consumer); @@ -226,8 +223,7 @@ public void testConfigureAutoCommitMode() throws Exception { } @Test - public void testConfigureDisableOffsetCommitWithCheckpointing() throws Exception { - @SuppressWarnings("unchecked") + void testConfigureDisableOffsetCommitWithCheckpointing() throws Exception { // auto-commit enabled; this should be ignored in this case final DummyFlinkKafkaConsumer consumer = new DummyFlinkKafkaConsumer<>(true); consumer.setCommitOffsetsOnCheckpoints( @@ -241,8 +237,7 @@ public void testConfigureDisableOffsetCommitWithCheckpointing() throws Exception } @Test - public void testConfigureDisableOffsetCommitWithoutCheckpointing() throws Exception { - @SuppressWarnings("unchecked") + void testConfigureDisableOffsetCommitWithoutCheckpointing() throws Exception { final DummyFlinkKafkaConsumer consumer = new DummyFlinkKafkaConsumer<>(false); setupConsumer(consumer); @@ -255,8 +250,8 @@ public void testConfigureDisableOffsetCommitWithoutCheckpointing() throws Except * (filterRestoredPartitionsWithDiscovered is active) */ @Test - public void testSetFilterRestoredParitionsNoChange() throws Exception { - checkFilterRestoredPartitionsWithDisovered( + void testSetFilterRestoredPartitionsNoChange() throws Exception { + checkFilterRestoredPartitionsWithDiscovered( Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), @@ -268,8 +263,8 @@ public void testSetFilterRestoredParitionsNoChange() throws Exception { * in restored partitions. (filterRestoredPartitionsWithDiscovered is active) */ @Test - public void testSetFilterRestoredParitionsWithRemovedTopic() throws Exception { - checkFilterRestoredPartitionsWithDisovered( + void testSetFilterRestoredPartitionsWithRemovedTopic() throws Exception { + checkFilterRestoredPartitionsWithDiscovered( Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), Arrays.asList(new String[] {"kafka_topic_1"}), Arrays.asList(new String[] {"kafka_topic_1"}), @@ -281,8 +276,8 @@ public void testSetFilterRestoredParitionsWithRemovedTopic() throws Exception { * (filterRestoredPartitionsWithDiscovered is active) */ @Test - public void testSetFilterRestoredParitionsWithAddedTopic() throws Exception { - checkFilterRestoredPartitionsWithDisovered( + void testSetFilterRestoredPartitionsWithAddedTopic() throws Exception { + checkFilterRestoredPartitionsWithDiscovered( Arrays.asList(new String[] {"kafka_topic_1"}), Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), @@ -294,8 +289,8 @@ public void testSetFilterRestoredParitionsWithAddedTopic() throws Exception { * (filterRestoredPartitionsWithDiscovered is disabled) */ @Test - public void testDisableFilterRestoredParitionsNoChange() throws Exception { - checkFilterRestoredPartitionsWithDisovered( + void testDisableFilterRestoredPartitionsNoChange() throws Exception { + checkFilterRestoredPartitionsWithDiscovered( Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), @@ -307,8 +302,8 @@ public void testDisableFilterRestoredParitionsNoChange() throws Exception { * still in restored partitions. (filterRestoredPartitionsWithDiscovered is disabled) */ @Test - public void testDisableFilterRestoredParitionsWithRemovedTopic() throws Exception { - checkFilterRestoredPartitionsWithDisovered( + void testDisableFilterRestoredPartitionsWithRemovedTopic() throws Exception { + checkFilterRestoredPartitionsWithDiscovered( Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), Arrays.asList(new String[] {"kafka_topic_1"}), Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), @@ -320,15 +315,15 @@ public void testDisableFilterRestoredParitionsWithRemovedTopic() throws Exceptio * (filterRestoredPartitionsWithDiscovered is disabled) */ @Test - public void testDisableFilterRestoredParitionsWithAddedTopic() throws Exception { - checkFilterRestoredPartitionsWithDisovered( + void testDisableFilterRestoredPartitionsWithAddedTopic() throws Exception { + checkFilterRestoredPartitionsWithDiscovered( Arrays.asList(new String[] {"kafka_topic_1"}), Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), Arrays.asList(new String[] {"kafka_topic_1", "kafka_topic_2"}), true); } - private void checkFilterRestoredPartitionsWithDisovered( + private void checkFilterRestoredPartitionsWithDiscovered( List restoredKafkaTopics, List initKafkaTopics, List expectedSubscribedPartitions, @@ -374,7 +369,7 @@ private void checkFilterRestoredPartitionsWithDisovered( @Test @SuppressWarnings("unchecked") - public void testSnapshotStateWithCommitOnCheckpointsEnabled() throws Exception { + void testSnapshotStateWithCommitOnCheckpointsEnabled() throws Exception { // -------------------------------------------------------------------- // prepare fake states @@ -431,7 +426,7 @@ public void go() throws Exception { assertThat(snapshot1).isEqualTo(state1); assertThat(consumer.getPendingOffsetsToCommit()).hasSize(1); - assertThat(consumer.getPendingOffsetsToCommit().get(138L)).isEqualTo(state1); + assertThat(consumer.getPendingOffsetsToCommit()).containsEntry(138L, state1); // checkpoint 2 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(140, 140)); @@ -446,7 +441,7 @@ public void go() throws Exception { assertThat(snapshot2).isEqualTo(state2); assertThat(consumer.getPendingOffsetsToCommit()).hasSize(2); - assertThat(consumer.getPendingOffsetsToCommit().get(140L)).isEqualTo(state2); + assertThat(consumer.getPendingOffsetsToCommit()).containsEntry(140L, state2); // ack checkpoint 1 consumer.notifyCheckpointComplete(138L); @@ -468,7 +463,7 @@ public void go() throws Exception { assertThat(snapshot3).isEqualTo(state3); assertThat(consumer.getPendingOffsetsToCommit()).hasSize(2); - assertThat(consumer.getPendingOffsetsToCommit().get(141L)).isEqualTo(state3); + assertThat(consumer.getPendingOffsetsToCommit()).containsEntry(141L, state3); // ack checkpoint 3, subsumes number 2 consumer.notifyCheckpointComplete(141L); @@ -487,7 +482,7 @@ public void go() throws Exception { @Test @SuppressWarnings("unchecked") - public void testSnapshotStateWithCommitOnCheckpointsDisabled() throws Exception { + void testSnapshotStateWithCommitOnCheckpointsDisabled() throws Exception { // -------------------------------------------------------------------- // prepare fake states // -------------------------------------------------------------------- @@ -543,8 +538,7 @@ public void go() throws Exception { } assertThat(snapshot1).isEqualTo(state1); - assertThat(consumer.getPendingOffsetsToCommit().size()) - .isEqualTo(0); // pending offsets to commit should not be updated + assertThat(consumer.getPendingOffsetsToCommit()).isEmpty(); // pending offsets to commit should not be updated // checkpoint 2 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(140, 140)); @@ -558,12 +552,11 @@ public void go() throws Exception { } assertThat(snapshot2).isEqualTo(state2); - assertThat(consumer.getPendingOffsetsToCommit().size()) - .isEqualTo(0); // pending offsets to commit should not be updated + assertThat(consumer.getPendingOffsetsToCommit()).isEmpty(); // pending offsets to commit should not be updated // ack checkpoint 1 consumer.notifyCheckpointComplete(138L); - assertThat(fetcher.getCommitCount()).isEqualTo(0); + assertThat(fetcher.getCommitCount()).isZero(); assertThat(fetcher.getAndClearLastCommittedOffsets()) .isNull(); // no offsets should be committed @@ -579,17 +572,16 @@ public void go() throws Exception { } assertThat(snapshot3).isEqualTo(state3); - assertThat(consumer.getPendingOffsetsToCommit().size()) - .isEqualTo(0); // pending offsets to commit should not be updated + assertThat(consumer.getPendingOffsetsToCommit()).isEmpty(); // pending offsets to commit should not be updated // ack checkpoint 3, subsumes number 2 consumer.notifyCheckpointComplete(141L); - assertThat(fetcher.getCommitCount()).isEqualTo(0); + assertThat(fetcher.getCommitCount()).isZero(); assertThat(fetcher.getAndClearLastCommittedOffsets()) .isNull(); // no offsets should be committed consumer.notifyCheckpointComplete(666); // invalid checkpoint - assertThat(fetcher.getCommitCount()).isEqualTo(0); + assertThat(fetcher.getCommitCount()).isZero(); assertThat(fetcher.getAndClearLastCommittedOffsets()) .isNull(); // no offsets should be committed @@ -598,7 +590,7 @@ public void go() throws Exception { } @Test - public void testClosePartitionDiscovererWhenOpenThrowException() throws Exception { + void testClosePartitionDiscovererWhenOpenThrowException() throws Exception { final RuntimeException failureCause = new RuntimeException(new FlinkException("Test partition discoverer exception")); final FailingPartitionDiscoverer failingPartitionDiscoverer = @@ -614,7 +606,7 @@ public void testClosePartitionDiscovererWhenOpenThrowException() throws Exceptio } @Test - public void testClosePartitionDiscovererWhenCreateKafkaFetcherFails() throws Exception { + void testClosePartitionDiscovererWhenCreateKafkaFetcherFails() throws Exception { final FlinkException failureCause = new FlinkException("Create Kafka fetcher failure."); final DummyPartitionDiscoverer testPartitionDiscoverer = new DummyPartitionDiscoverer(); @@ -633,7 +625,7 @@ public void testClosePartitionDiscovererWhenCreateKafkaFetcherFails() throws Exc } @Test - public void testClosePartitionDiscovererWhenKafkaFetcherFails() throws Exception { + void testClosePartitionDiscovererWhenKafkaFetcherFails() throws Exception { final FlinkException failureCause = new FlinkException("Run Kafka fetcher failure."); // in this scenario, the partition discoverer will be concurrently accessed; @@ -658,23 +650,15 @@ public void testClosePartitionDiscovererWhenKafkaFetcherFails() throws Exception private void testFailingConsumerLifecycle( FlinkKafkaConsumerBase testKafkaConsumer, @Nonnull Exception expectedException) throws Exception { - try { + assertThatThrownBy(() -> { setupConsumer(testKafkaConsumer); testKafkaConsumer.run(new TestSourceContext<>()); - - fail( - "Exception should have been thrown from open / run method of FlinkKafkaConsumerBase."); - } catch (Exception e) { - assertThat( - ExceptionUtils.findThrowable( - e, throwable -> throwable.equals(expectedException))) - .isPresent(); - } + }).satisfies(FlinkAssertions.anyCauseMatches(expectedException.getClass())); testKafkaConsumer.close(); } @Test - public void testClosePartitionDiscovererWithCancellation() throws Exception { + void testClosePartitionDiscovererWithCancellation() throws Exception { final DummyPartitionDiscoverer testPartitionDiscoverer = new DummyPartitionDiscoverer(); final TestingFlinkKafkaConsumer consumer = @@ -707,7 +691,7 @@ private void setupConsumer(FlinkKafkaConsumerBase consumer) throws Excep * that the two methods create compatible serializers. */ @Test - public void testExplicitStateSerializerCompatibility() throws Exception { + void testExplicitStateSerializerCompatibility() throws Exception { ExecutionConfig executionConfig = new ExecutionConfig(); Tuple2 tuple = @@ -733,12 +717,12 @@ public void testExplicitStateSerializerCompatibility() throws Exception { } @Test - public void testScaleUp() throws Exception { + void testScaleUp() throws Exception { testRescaling(5, 2, 8, 30); } @Test - public void testScaleDown() throws Exception { + void testScaleDown() throws Exception { testRescaling(5, 10, 2, 100); } @@ -883,7 +867,7 @@ private void testRescaling( } @Test - public void testOpen() throws Exception { + void testOpen() throws Exception { MockDeserializationSchema deserializationSchema = new MockDeserializationSchema<>(); AbstractStreamOperatorTestHarness testHarness = @@ -898,7 +882,7 @@ public void testOpen() throws Exception { } @Test - public void testOpenWithRestoreState() throws Exception { + void testOpenWithRestoreState() throws Exception { MockDeserializationSchema deserializationSchema = new MockDeserializationSchema<>(); final FlinkKafkaConsumerBase consumer = new DummyFlinkKafkaConsumer<>( @@ -945,7 +929,7 @@ public FailingPartitionDiscoverer(RuntimeException failureCause) { } @Override - protected void initializeConnections() throws Exception { + protected void initializeConnections() { closed = false; } @@ -953,7 +937,7 @@ protected void initializeConnections() throws Exception { protected void wakeupConnections() {} @Override - protected void closeConnections() throws Exception { + protected void closeConnections() { closed = true; } @@ -1087,8 +1071,7 @@ public void cancel() { @Override protected void doCommitInternalOffsetsToKafka( - Map offsets, @Nonnull KafkaCommitCallback commitCallback) - throws Exception {} + Map offsets, @Nonnull KafkaCommitCallback commitCallback) {} @Override protected KPH createKafkaPartitionHandle(KafkaTopicPartition partition) { @@ -1104,11 +1087,10 @@ protected KPH createKafkaPartitionHandle(KafkaTopicPartition partition) { private static class DummyFlinkKafkaConsumer extends FlinkKafkaConsumerBase { private static final long serialVersionUID = 1L; - private SupplierWithException, Exception> testFetcherSupplier; - private AbstractPartitionDiscoverer testPartitionDiscoverer; - private boolean isAutoCommitEnabled; + private final SupplierWithException, Exception> testFetcherSupplier; + private final AbstractPartitionDiscoverer testPartitionDiscoverer; + private final boolean isAutoCommitEnabled; - @SuppressWarnings("unchecked") DummyFlinkKafkaConsumer() { this(false); } @@ -1149,7 +1131,6 @@ private static class DummyFlinkKafkaConsumer extends FlinkKafkaConsumerBase) mock(KeyedDeserializationSchema.class)); } - @SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( SupplierWithException, Exception> abstractFetcherSupplier, AbstractPartitionDiscoverer abstractPartitionDiscoverer, @@ -1161,7 +1142,6 @@ private static class DummyFlinkKafkaConsumer extends FlinkKafkaConsumerBase testFetcher, AbstractPartitionDiscoverer testPartitionDiscoverer, @@ -1173,7 +1153,6 @@ private static class DummyFlinkKafkaConsumer extends FlinkKafkaConsumerBase testFetcher, AbstractPartitionDiscoverer testPartitionDiscoverer, @@ -1201,7 +1180,6 @@ private static class DummyFlinkKafkaConsumer extends FlinkKafkaConsumerBase) mock(KeyedDeserializationSchema.class)); } - @SuppressWarnings("unchecked") DummyFlinkKafkaConsumer( SupplierWithException, Exception> testFetcherSupplier, AbstractPartitionDiscoverer testPartitionDiscoverer, @@ -1321,12 +1299,12 @@ public void clear() { } @Override - public Iterable get() throws Exception { + public Iterable get() { return list; } @Override - public void add(T value) throws Exception { + public void add(T value) { Preconditions.checkNotNull(value, "You cannot add null to a ListState."); list.add(value); } @@ -1340,14 +1318,13 @@ boolean isClearCalled() { } @Override - public void update(List values) throws Exception { + public void update(List values) { clear(); - addAll(values); } @Override - public void addAll(List values) throws Exception { + public void addAll(List values) { if (values != null) { values.forEach( v -> Preconditions.checkNotNull(v, "You cannot add null to a ListState.")); @@ -1357,7 +1334,6 @@ public void addAll(List values) throws Exception { } } - @SuppressWarnings("unchecked") private static void setupConsumer( FlinkKafkaConsumerBase consumer, boolean isRestored, @@ -1406,8 +1382,7 @@ private MockFetcher(HashMap... stateSnapshotsToReturn @Override protected void doCommitInternalOffsetsToKafka( - Map offsets, @Nonnull KafkaCommitCallback commitCallback) - throws Exception { + Map offsets, @Nonnull KafkaCommitCallback commitCallback) { this.lastCommittedOffsets = offsets; this.commitCount++; commitCallback.onSuccess(); @@ -1460,20 +1435,17 @@ private MockOperatorStateStore(ListState restoredUnionListState) { @Override @SuppressWarnings("unchecked") - public ListState getUnionListState(ListStateDescriptor stateDescriptor) - throws Exception { + public ListState getUnionListState(ListStateDescriptor stateDescriptor) { return (ListState) mockRestoredUnionListState; } @Override - public BroadcastState getBroadcastState( - MapStateDescriptor stateDescriptor) throws Exception { + public BroadcastState getBroadcastState(MapStateDescriptor stateDescriptor) { throw new UnsupportedOperationException(); } @Override - public ListState getListState(ListStateDescriptor stateDescriptor) - throws Exception { + public ListState getListState(ListStateDescriptor stateDescriptor) { throw new UnsupportedOperationException(); } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerITCase.java index 90c773730..74069be27 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerITCase.java @@ -34,13 +34,13 @@ import org.apache.flink.test.util.MiniClusterWithClientResource; import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.junit.ClassRule; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.TestInstance.Lifecycle; import org.junit.jupiter.api.io.TempDir; +import org.testcontainers.junit.jupiter.Container; import java.nio.file.Path; import java.util.Properties; @@ -48,10 +48,10 @@ /** ITCase tests class for {@link FlinkKafkaConsumer}. */ @TestInstance(Lifecycle.PER_CLASS) -public class FlinkKafkaConsumerITCase { +class FlinkKafkaConsumerITCase { private static final String TOPIC1 = "FlinkKafkaConsumerITCase_topic1"; - @ClassRule + @Container public static final MiniClusterWithClientResource MINI_CLUSTER = new MiniClusterWithClientResource( new MiniClusterResourceConfiguration.Builder() @@ -71,7 +71,7 @@ public void tearDown() throws Exception { } @Test - public void testStopWithSavepoint(@TempDir Path savepointsDir) throws Exception { + void testStopWithSavepoint(@TempDir Path savepointsDir) throws Exception { Configuration config = new Configuration() .set( diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaInternalProducerITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaInternalProducerITCase.java index 15729a8c8..feb1fb876 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaInternalProducerITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaInternalProducerITCase.java @@ -27,27 +27,30 @@ import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.time.Duration; import java.util.Collections; import java.util.Iterator; import java.util.Properties; import java.util.UUID; +import java.util.concurrent.TimeUnit; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for our own {@link FlinkKafkaInternalProducer}. */ @SuppressWarnings("serial") -public class FlinkKafkaInternalProducerITCase extends KafkaTestBase { +class FlinkKafkaInternalProducerITCase extends KafkaTestBase { protected String transactionalId; protected Properties extraProperties; private volatile Exception exceptionInCallback; - @BeforeClass - public static void prepare() throws Exception { + @BeforeAll + protected static void prepare() throws Exception { LOG.info("-------------------------------------------------------------------------"); LOG.info(" Starting KafkaTestBase "); LOG.info("-------------------------------------------------------------------------"); @@ -63,8 +66,8 @@ public static void prepare() throws Exception { .setKafkaServerProperties(serverProperties)); } - @Before - public void before() { + @BeforeEach + void before() { transactionalId = UUID.randomUUID().toString(); extraProperties = new Properties(); extraProperties.putAll(standardProps); @@ -80,8 +83,9 @@ public void before() { extraProperties.put("isolation.level", "read_committed"); } - @Test(timeout = 60000L) - public void testHappyPath() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testHappyPath() { String topicName = "flink-kafka-producer-happy-path"; Producer kafkaProducer = new FlinkKafkaInternalProducer<>(extraProperties); @@ -101,8 +105,9 @@ public void testHappyPath() throws Exception { deleteTestTopic(topicName); } - @Test(timeout = 30000L) - public void testResumeTransaction() throws Exception { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testResumeTransaction() { String topicName = "flink-kafka-producer-resume-transaction"; FlinkKafkaInternalProducer kafkaProducer = new FlinkKafkaInternalProducer<>(extraProperties); @@ -130,7 +135,7 @@ public void testResumeTransaction() throws Exception { assertRecord(topicName, "42", "42"); // this shouldn't throw - in case of network split, old producer might attempt to commit - // it's transaction + // its transaction kafkaProducer.commitTransaction(); // this shouldn't fail also, for same reason as above @@ -147,62 +152,77 @@ public void testResumeTransaction() throws Exception { deleteTestTopic(topicName); } - @Test(timeout = 30000L, expected = IllegalStateException.class) - public void testPartitionsForAfterClosed() { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testPartitionsForAfterClosed() { FlinkKafkaInternalProducer kafkaProducer = new FlinkKafkaInternalProducer<>(extraProperties); kafkaProducer.close(Duration.ofSeconds(5)); - kafkaProducer.partitionsFor("Topic"); + assertThatThrownBy(() -> kafkaProducer.partitionsFor("Topic")) + .isInstanceOf(IllegalStateException.class); } - @Test(timeout = 30000L, expected = IllegalStateException.class) - public void testInitTransactionsAfterClosed() { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testInitTransactionsAfterClosed() { FlinkKafkaInternalProducer kafkaProducer = new FlinkKafkaInternalProducer<>(extraProperties); kafkaProducer.close(Duration.ofSeconds(5)); - kafkaProducer.initTransactions(); + assertThatThrownBy(kafkaProducer::initTransactions) + .isInstanceOf(IllegalStateException.class); } - @Test(timeout = 30000L, expected = IllegalStateException.class) - public void testBeginTransactionAfterClosed() { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testBeginTransactionAfterClosed() { FlinkKafkaInternalProducer kafkaProducer = new FlinkKafkaInternalProducer<>(extraProperties); kafkaProducer.initTransactions(); kafkaProducer.close(Duration.ofSeconds(5)); - kafkaProducer.beginTransaction(); + assertThatThrownBy(kafkaProducer::beginTransaction) + .isInstanceOf(IllegalStateException.class); } - @Test(timeout = 30000L, expected = IllegalStateException.class) - public void testCommitTransactionAfterClosed() { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testCommitTransactionAfterClosed() { String topicName = "testCommitTransactionAfterClosed"; FlinkKafkaInternalProducer kafkaProducer = getClosedProducer(topicName); - kafkaProducer.commitTransaction(); + assertThatThrownBy(kafkaProducer::commitTransaction) + .isInstanceOf(IllegalStateException.class); } - @Test(timeout = 30000L, expected = IllegalStateException.class) - public void testResumeTransactionAfterClosed() { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testResumeTransactionAfterClosed() { String topicName = "testAbortTransactionAfterClosed"; FlinkKafkaInternalProducer kafkaProducer = getClosedProducer(topicName); - kafkaProducer.resumeTransaction(0L, (short) 1); + assertThatThrownBy(() -> kafkaProducer.resumeTransaction(0L, (short) 1)) + .isInstanceOf(IllegalStateException.class); } - @Test(timeout = 30000L, expected = IllegalStateException.class) - public void testAbortTransactionAfterClosed() { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testAbortTransactionAfterClosed() { String topicName = "testAbortTransactionAfterClosed"; FlinkKafkaInternalProducer kafkaProducer = getClosedProducer(topicName); kafkaProducer.abortTransaction(); - kafkaProducer.resumeTransaction(0L, (short) 1); + assertThatThrownBy(() -> kafkaProducer.resumeTransaction(0L, (short) 1)) + .isInstanceOf(IllegalStateException.class); } - @Test(timeout = 30000L, expected = IllegalStateException.class) - public void testFlushAfterClosed() { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testFlushAfterClosed() { String topicName = "testCommitTransactionAfterClosed"; FlinkKafkaInternalProducer kafkaProducer = getClosedProducer(topicName); - kafkaProducer.flush(); + assertThatThrownBy(kafkaProducer::flush) + .isInstanceOf(IllegalStateException.class); } - @Test(timeout = 30000L) - public void testProducerWhenCommitEmptyPartitionsToOutdatedTxnCoordinator() throws Exception { + @Test + @Timeout(value = 30L, unit = TimeUnit.SECONDS) + void testProducerWhenCommitEmptyPartitionsToOutdatedTxnCoordinator() throws Exception { String topic = "flink-kafka-producer-txn-coordinator-changed-" + UUID.randomUUID(); createTestTopic(topic, 1, 1); Producer kafkaProducer = new FlinkKafkaInternalProducer<>(extraProperties); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java index 4274fcff2..a269471b9 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java @@ -40,16 +40,18 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.serialization.ByteArraySerializer; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import java.util.ArrayList; import java.util.List; import java.util.Properties; +import java.util.concurrent.TimeUnit; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; @@ -58,16 +60,22 @@ import static org.mockito.Mockito.when; /** Tests for the {@link FlinkKafkaProducerBase}. */ -public class FlinkKafkaProducerBaseTest { +class FlinkKafkaProducerBaseTest { /** Tests that the constructor eagerly checks bootstrap servers are set in config. */ - @Test(expected = IllegalArgumentException.class) - public void testInstantiationFailsWhenBootstrapServersMissing() throws Exception { + @Test + void testInstantiationFailsWhenBootstrapServersMissing() throws Exception { // no bootstrap servers set in props Properties props = new Properties(); // should throw IllegalArgumentException - new DummyFlinkKafkaProducer<>( - props, new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null); + assertThatThrownBy( + () -> + new DummyFlinkKafkaProducer<>( + props, + new KeyedSerializationSchemaWrapper<>( + new SimpleStringSchema()), + null)) + .isInstanceOf(IllegalArgumentException.class); } /** @@ -75,7 +83,7 @@ public void testInstantiationFailsWhenBootstrapServersMissing() throws Exception * deserializers if not set. */ @Test - public void testKeyValueDeserializersSetIfMissing() throws Exception { + void testKeyValueDeserializersSetIfMissing() throws Exception { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:12345"); // should set missing key value deserializers @@ -94,7 +102,7 @@ public void testKeyValueDeserializersSetIfMissing() throws Exception { /** Tests that partitions list is determinate and correctly provided to custom partitioner. */ @SuppressWarnings("unchecked") @Test - public void testPartitionerInvokedWithDeterminatePartitionList() throws Exception { + void testPartitionerInvokedWithDeterminatePartitionList() throws Exception { FlinkKafkaPartitioner mockPartitioner = mock(FlinkKafkaPartitioner.class); RuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class); @@ -141,7 +149,7 @@ public void testPartitionerInvokedWithDeterminatePartitionList() throws Exceptio * should be rethrown. */ @Test - public void testAsyncErrorRethrownOnInvoke() throws Throwable { + void testAsyncErrorRethrownOnInvoke() throws Throwable { final DummyFlinkKafkaProducer producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), @@ -160,17 +168,9 @@ public void testAsyncErrorRethrownOnInvoke() throws Throwable { .get(0) .onCompletion(null, new Exception("artificial async exception")); - try { - testHarness.processElement(new StreamRecord<>("msg-2")); - } catch (Exception e) { - // the next invoke should rethrow the async exception - assertThat(e.getCause().getMessage()).contains("artificial async exception"); - - // test succeeded - return; - } - - fail("unknown failure"); + // the next invoke should rethrow the async exception + assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>("msg-2"))) + .hasStackTraceContaining("artificial async exception"); } /** @@ -178,7 +178,7 @@ public void testAsyncErrorRethrownOnInvoke() throws Throwable { * should be rethrown. */ @Test - public void testAsyncErrorRethrownOnCheckpoint() throws Throwable { + void testAsyncErrorRethrownOnCheckpoint() throws Throwable { final DummyFlinkKafkaProducer producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), @@ -197,17 +197,9 @@ public void testAsyncErrorRethrownOnCheckpoint() throws Throwable { .get(0) .onCompletion(null, new Exception("artificial async exception")); - try { - testHarness.snapshot(123L, 123L); - } catch (Exception e) { - // the next invoke should rethrow the async exception - assertThat(e.getCause().getMessage()).contains("artificial async exception"); - - // test succeeded - return; - } - - fail("unknown failure"); + // the next invoke should rethrow the async exception + assertThatThrownBy(() -> testHarness.snapshot(123L, 123L)) + .hasStackTraceContaining("artificial async exception"); } /** @@ -219,8 +211,9 @@ public void testAsyncErrorRethrownOnCheckpoint() throws Throwable { * pending records. The test for that is covered in testAtLeastOnceProducer. */ @SuppressWarnings("unchecked") - @Test(timeout = 5000) - public void testAsyncErrorRethrownOnCheckpointAfterFlush() throws Throwable { + @Test + @Timeout(value = 5L, unit = TimeUnit.SECONDS) + void testAsyncErrorRethrownOnCheckpointAfterFlush() throws Throwable { final DummyFlinkKafkaProducer producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), @@ -261,18 +254,9 @@ public void go() throws Exception { .onCompletion(null, new Exception("artificial async failure for 2nd message")); producer.getPendingCallbacks().get(2).onCompletion(null, null); - try { - snapshotThread.sync(); - } catch (Exception e) { - // the snapshot should have failed with the async exception - assertThat(e.getCause().getMessage()) - .contains("artificial async failure for 2nd message"); - - // test succeeded - return; - } - - fail("unknown failure"); + // the snapshot should have failed with the async exception + assertThatThrownBy(() -> snapshotThread.sync()) + .hasStackTraceContaining("artificial async failure for 2nd message"); } /** @@ -280,8 +264,9 @@ public void go() throws Exception { * the test will not finish if the logic is broken. */ @SuppressWarnings("unchecked") - @Test(timeout = 10000) - public void testAtLeastOnceProducer() throws Throwable { + @Test + @Timeout(value = 10L, unit = TimeUnit.SECONDS) + void testAtLeastOnceProducer() throws Throwable { final DummyFlinkKafkaProducer producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), @@ -353,8 +338,9 @@ public void go() throws Exception { * records; we set a timeout because the test will not finish if the logic is broken. */ @SuppressWarnings("unchecked") - @Test(timeout = 5000) - public void testDoesNotWaitForPendingRecordsIfFlushingDisabled() throws Throwable { + @Test + @Timeout(value = 5L, unit = TimeUnit.SECONDS) + void testDoesNotWaitForPendingRecordsIfFlushingDisabled() throws Throwable { final DummyFlinkKafkaProducer producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerITCase.java index 7b345bf7d..50802b896 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerITCase.java @@ -30,9 +30,9 @@ import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema; import org.apache.kafka.common.errors.ProducerFencedException; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.Arrays; @@ -57,7 +57,7 @@ *

Do not run this class in the same junit execution with other tests in your IDE. This may lead * leaking threads. */ -public class FlinkKafkaProducerITCase extends KafkaTestBase { +class FlinkKafkaProducerITCase extends KafkaTestBase { protected String transactionalId; protected Properties extraProperties; @@ -68,8 +68,8 @@ public class FlinkKafkaProducerITCase extends KafkaTestBase { protected KeyedSerializationSchema integerKeyedSerializationSchema = new KeyedSerializationSchemaWrapper<>(integerSerializationSchema); - @Before - public void before() { + @BeforeEach + void before() { transactionalId = UUID.randomUUID().toString(); extraProperties = new Properties(); extraProperties.putAll(standardProps); @@ -86,12 +86,12 @@ public void before() { } @Test - public void resourceCleanUpNone() throws Exception { + void resourceCleanUpNone() throws Exception { resourceCleanUp(FlinkKafkaProducer.Semantic.NONE); } @Test - public void resourceCleanUpAtLeastOnce() throws Exception { + void resourceCleanUpAtLeastOnce() throws Exception { resourceCleanUp(FlinkKafkaProducer.Semantic.AT_LEAST_ONCE); } @@ -125,7 +125,7 @@ public void resourceCleanUp(FlinkKafkaProducer.Semantic semantic) throws Excepti * will not clash with previous transactions using same transactional.ids. */ @Test - public void testRestoreToCheckpointAfterExceedingProducersPool() throws Exception { + void testRestoreToCheckpointAfterExceedingProducersPool() throws Exception { String topic = "flink-kafka-producer-fail-before-notify"; try (OneInputStreamOperatorTestHarness testHarness1 = @@ -171,8 +171,8 @@ public void testRestoreToCheckpointAfterExceedingProducersPool() throws Exceptio /** This test hangs when running it in your IDE. */ @Test - @Ignore - public void testFlinkKafkaProducerFailBeforeNotify() throws Exception { + @Disabled + void testFlinkKafkaProducerFailBeforeNotify() throws Exception { String topic = "flink-kafka-producer-fail-before-notify"; final OneInputStreamOperatorTestHarness testHarness = @@ -220,7 +220,7 @@ public void testFlinkKafkaProducerFailBeforeNotify() throws Exception { * committed records that were created after this lingering transaction. */ @Test - public void testFailBeforeNotifyAndResumeWorkAfterwards() throws Exception { + void testFailBeforeNotifyAndResumeWorkAfterwards() throws Exception { String topic = "flink-kafka-producer-fail-before-notify"; OneInputStreamOperatorTestHarness testHarness1 = createTestHarness(topic); @@ -274,7 +274,7 @@ public void testFailBeforeNotifyAndResumeWorkAfterwards() throws Exception { } @Test - public void testFailAndRecoverSameCheckpointTwice() throws Exception { + void testFailAndRecoverSameCheckpointTwice() throws Exception { String topic = "flink-kafka-producer-fail-and-recover-same-checkpoint-twice"; OperatorSubtaskState snapshot1; @@ -331,7 +331,7 @@ public void testFailAndRecoverSameCheckpointTwice() throws Exception { * read committed records that were created after this lingering transaction. */ @Test - public void testScaleDownBeforeFirstCheckpoint() throws Exception { + void testScaleDownBeforeFirstCheckpoint() throws Exception { String topic = "scale-down-before-first-checkpoint"; List operatorsToClose = new ArrayList<>(); @@ -401,7 +401,7 @@ public void testScaleDownBeforeFirstCheckpoint() throws Exception { * so it has to generate new ones that are greater then 4. */ @Test - public void testScaleUpAfterScalingDown() throws Exception { + void testScaleUpAfterScalingDown() throws Exception { String topic = "scale-up-after-scaling-down"; final int parallelism1 = 4; @@ -531,7 +531,7 @@ private OperatorSubtaskState repartitionAndExecute( } @Test - public void testRecoverCommittedTransaction() throws Exception { + void testRecoverCommittedTransaction() throws Exception { String topic = "flink-kafka-producer-recover-committed-transaction"; OneInputStreamOperatorTestHarness testHarness = createTestHarness(topic); @@ -560,7 +560,7 @@ public void testRecoverCommittedTransaction() throws Exception { } @Test - public void testRunOutOfProducersInThePool() throws Exception { + void testRunOutOfProducersInThePool() throws Exception { String topic = "flink-kafka-run-out-of-producers"; try (OneInputStreamOperatorTestHarness testHarness = @@ -583,7 +583,7 @@ public void testRunOutOfProducersInThePool() throws Exception { } @Test - public void testMigrateFromAtLeastOnceToExactlyOnce() throws Exception { + void testMigrateFromAtLeastOnceToExactlyOnce() throws Exception { String topic = "testMigrateFromAtLeastOnceToExactlyOnce"; testRecoverWithChangeSemantics( topic, @@ -594,7 +594,7 @@ public void testMigrateFromAtLeastOnceToExactlyOnce() throws Exception { } @Test - public void testMigrateFromAtExactlyOnceToAtLeastOnce() throws Exception { + void testMigrateFromAtExactlyOnceToAtLeastOnce() throws Exception { String topic = "testMigrateFromExactlyOnceToAtLeastOnce"; testRecoverWithChangeSemantics( topic, @@ -605,7 +605,7 @@ public void testMigrateFromAtExactlyOnceToAtLeastOnce() throws Exception { } @Test - public void testDefaultTransactionalIdPrefix() throws Exception { + void testDefaultTransactionalIdPrefix() throws Exception { Properties properties = createProperties(); String topic = "testCustomizeTransactionalIdPrefix"; FlinkKafkaProducer kafkaProducer = @@ -642,7 +642,7 @@ public void testDefaultTransactionalIdPrefix() throws Exception { } @Test - public void testCustomizeTransactionalIdPrefix() throws Exception { + void testCustomizeTransactionalIdPrefix() throws Exception { String transactionalIdPrefix = "my-prefix"; Properties properties = createProperties(); @@ -675,7 +675,7 @@ public void testCustomizeTransactionalIdPrefix() throws Exception { } @Test - public void testRestoreUsingDifferentTransactionalIdPrefix() throws Exception { + void testRestoreUsingDifferentTransactionalIdPrefix() throws Exception { String topic = "testCustomizeTransactionalIdPrefix"; Properties properties = createProperties(); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerMigrationOperatorTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerMigrationOperatorTest.java index 5e87f04b8..3041d2e7c 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerMigrationOperatorTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerMigrationOperatorTest.java @@ -19,9 +19,9 @@ package org.apache.flink.streaming.connectors.kafka; import org.apache.flink.FlinkVersion; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameters; -import org.junit.Ignore; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Disabled; import java.util.Arrays; import java.util.Collection; @@ -34,8 +34,8 @@ * by the {@link #getOperatorSnapshotPath(FlinkVersion)} method then copy the resource to the path * also specified by the {@link #getOperatorSnapshotPath(FlinkVersion)} method. */ -public class FlinkKafkaProducerMigrationOperatorTest extends FlinkKafkaProducerMigrationTest { - @Parameterized.Parameters(name = "Migration Savepoint: {0}") +class FlinkKafkaProducerMigrationOperatorTest extends FlinkKafkaProducerMigrationTest { + @Parameters(name = "Migration Savepoint: {0}") public static Collection parameters() { return Arrays.asList( FlinkVersion.v1_8, FlinkVersion.v1_9, FlinkVersion.v1_10, FlinkVersion.v1_11); @@ -52,7 +52,7 @@ public String getOperatorSnapshotPath(FlinkVersion version) { + "-snapshot"; } - @Ignore + @Disabled @Override public void writeSnapshot() throws Exception { throw new UnsupportedOperationException(); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerMigrationTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerMigrationTest.java index 98ab88296..9ea613e89 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerMigrationTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerMigrationTest.java @@ -23,10 +23,11 @@ import org.apache.flink.runtime.jobgraph.OperatorID; import org.apache.flink.streaming.api.operators.StreamSink; import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness; +import org.apache.flink.testutils.junit.extensions.parameterized.ParameterizedTestExtension; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameters; import org.apache.kafka.clients.producer.ProducerConfig; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.extension.ExtendWith; import java.util.Collection; import java.util.Properties; @@ -38,9 +39,9 @@ *

For regenerating the binary snapshot files run {@link #writeSnapshot()} on the corresponding * Flink release-* branch. */ -@RunWith(Parameterized.class) +@ExtendWith(ParameterizedTestExtension.class) public class FlinkKafkaProducerMigrationTest extends KafkaMigrationTestBase { - @Parameterized.Parameters(name = "Migration Savepoint: {0}") + @Parameters(name = "Migration Savepoint: {0}") public static Collection parameters() { return FlinkVersion.rangeOf(FlinkVersion.v1_8, FlinkVersion.v1_16); } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerTest.java index 6fedcc43c..4fb380820 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerTest.java @@ -26,7 +26,7 @@ import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness; import org.apache.kafka.clients.producer.ProducerRecord; -import org.junit.Test; +import org.junit.jupiter.api.Test; import javax.annotation.Nullable; @@ -34,11 +34,12 @@ import java.util.Properties; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for {@link FlinkKafkaProducer}. */ -public class FlinkKafkaProducerTest { +class FlinkKafkaProducerTest { @Test - public void testOpenSerializationSchemaProducer() throws Exception { + void testOpenSerializationSchemaProducer() throws Exception { OpenTestingSerializationSchema schema = new OpenTestingSerializationSchema(); FlinkKafkaProducer kafkaProducer = new FlinkKafkaProducer<>("localhost:9092", "test-topic", schema); @@ -58,7 +59,7 @@ public void testOpenSerializationSchemaProducer() throws Exception { } @Test - public void testOpenKafkaSerializationSchemaProducer() throws Exception { + void testOpenKafkaSerializationSchemaProducer() throws Exception { OpenTestingKafkaSerializationSchema schema = new OpenTestingKafkaSerializationSchema(); Properties properties = new Properties(); properties.put("bootstrap.servers", "localhost:9092"); @@ -84,7 +85,7 @@ public void testOpenKafkaSerializationSchemaProducer() throws Exception { } @Test - public void testOpenKafkaCustomPartitioner() throws Exception { + void testOpenKafkaCustomPartitioner() throws Exception { CustomPartitioner partitioner = new CustomPartitioner<>(); Properties properties = new Properties(); properties.put("bootstrap.servers", "localhost:9092"); @@ -109,12 +110,13 @@ public void testOpenKafkaCustomPartitioner() throws Exception { assertThat(partitioner.openCalled).isTrue(); } - @Test(expected = NullPointerException.class) + @Test public void testProvidedNullTransactionalIdPrefix() { FlinkKafkaProducer kafkaProducer = new FlinkKafkaProducer<>( "localhost:9092", "test-topic", new OpenTestingSerializationSchema()); - kafkaProducer.setTransactionalIdPrefix(null); + assertThatThrownBy(() -> kafkaProducer.setTransactionalIdPrefix(null)) + .isInstanceOf(NullPointerException.class); } private static class CustomPartitioner extends FlinkKafkaPartitioner { diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java index a5abb5e6d..e42e7dd0b 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java @@ -24,17 +24,17 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.assertj.core.api.Assertions.assertThat; /** Tests for the{@link JSONKeyValueDeserializationSchema}. */ -public class JSONKeyValueDeserializationSchemaTest { +class JSONKeyValueDeserializationSchemaTest { private static final ObjectMapper OBJECT_MAPPER = JacksonMapperFactory.createObjectMapper(); @Test - public void testDeserializeWithoutMetadata() throws Exception { + void testDeserializeWithoutMetadata() throws Exception { ObjectNode initialKey = OBJECT_MAPPER.createObjectNode(); initialKey.put("index", 4); byte[] serializedKey = OBJECT_MAPPER.writeValueAsBytes(initialKey); @@ -54,7 +54,7 @@ public void testDeserializeWithoutMetadata() throws Exception { } @Test - public void testDeserializeWithoutKey() throws Exception { + void testDeserializeWithoutKey() throws Exception { byte[] serializedKey = null; ObjectNode initialValue = OBJECT_MAPPER.createObjectNode(); @@ -87,7 +87,7 @@ private static ConsumerRecord newConsumerRecord( } @Test - public void testDeserializeWithoutValue() throws Exception { + void testDeserializeWithoutValue() throws Exception { ObjectNode initialKey = OBJECT_MAPPER.createObjectNode(); initialKey.put("index", 4); byte[] serializedKey = OBJECT_MAPPER.writeValueAsBytes(initialKey); @@ -105,7 +105,7 @@ public void testDeserializeWithoutValue() throws Exception { } @Test - public void testDeserializeWithMetadata() throws Exception { + void testDeserializeWithMetadata() throws Exception { ObjectNode initialKey = OBJECT_MAPPER.createObjectNode(); initialKey.put("index", 4); byte[] serializedKey = OBJECT_MAPPER.writeValueAsBytes(initialKey); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java index 88f6ac607..a38d5e975 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java @@ -88,7 +88,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.NotLeaderForPartitionException; import org.apache.kafka.common.errors.TimeoutException; -import org.junit.Before; +import org.junit.jupiter.api.BeforeEach; import javax.annotation.Nullable; import javax.management.MBeanServer; @@ -143,8 +143,8 @@ protected KafkaConsumerTestBase(boolean useNewSource) { * Makes sure that no job is on the JobManager any more from any previous tests that use the * same mini cluster. Otherwise, missing slots may happen. */ - @Before - public void setClientAndEnsureNoJobIsLingering() throws Exception { + @BeforeEach + void setClientAndEnsureNoJobIsLingering() throws Exception { client = flink.getClusterClient(); waitUntilNoJobIsRunning(client); } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaITCase.java index 68db69187..fa2bf98c6 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaITCase.java @@ -38,8 +38,9 @@ import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import javax.annotation.Nullable; @@ -47,12 +48,13 @@ import java.io.IOException; import java.util.Optional; import java.util.UUID; +import java.util.concurrent.TimeUnit; /** IT cases for Kafka. */ -public class KafkaITCase extends KafkaConsumerTestBase { +class KafkaITCase extends KafkaConsumerTestBase { - @BeforeClass - public static void prepare() throws Exception { + @BeforeAll + protected static void prepare() throws Exception { KafkaProducerTestBase.prepare(); ((KafkaTestEnvironmentImpl) kafkaServer) .setProducerSemantic(FlinkKafkaProducer.Semantic.AT_LEAST_ONCE); @@ -62,131 +64,154 @@ public static void prepare() throws Exception { // Suite of Tests // ------------------------------------------------------------------------ - @Test(timeout = 120000) - public void testFailOnNoBroker() throws Exception { + @Test + @Timeout(value = 120L, unit = TimeUnit.SECONDS) + void testFailOnNoBroker() throws Exception { runFailOnNoBrokerTest(); } - @Test(timeout = 60000) - public void testConcurrentProducerConsumerTopology() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testConcurrentProducerConsumerTopology() throws Exception { runSimpleConcurrentProducerConsumerTopology(); } - @Test(timeout = 60000) - public void testKeyValueSupport() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testKeyValueSupport() throws Exception { runKeyValueTest(); } // --- canceling / failures --- - @Test(timeout = 60000) - public void testCancelingEmptyTopic() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testCancelingEmptyTopic() throws Exception { runCancelingOnEmptyInputTest(); } - @Test(timeout = 60000) - public void testCancelingFullTopic() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testCancelingFullTopic() throws Exception { runCancelingOnFullInputTest(); } // --- source to partition mappings and exactly once --- - @Test(timeout = 60000) - public void testOneToOneSources() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testOneToOneSources() throws Exception { runOneToOneExactlyOnceTest(); } - @Test(timeout = 60000) - public void testOneSourceMultiplePartitions() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testOneSourceMultiplePartitions() throws Exception { runOneSourceMultiplePartitionsExactlyOnceTest(); } - @Test(timeout = 60000) - public void testMultipleSourcesOnePartition() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testMultipleSourcesOnePartition() throws Exception { runMultipleSourcesOnePartitionExactlyOnceTest(); } // --- broker failure --- - @Test(timeout = 60000) - public void testBrokerFailure() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testBrokerFailure() throws Exception { runBrokerFailureTest(); } // --- special executions --- - @Test(timeout = 60000) - public void testBigRecordJob() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testBigRecordJob() throws Exception { runBigRecordTestTopology(); } - @Test(timeout = 60000) - public void testMultipleTopicsWithLegacySerializer() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testMultipleTopicsWithLegacySerializer() throws Exception { runProduceConsumeMultipleTopics(true); } - @Test(timeout = 60000) - public void testMultipleTopicsWithKafkaSerializer() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testMultipleTopicsWithKafkaSerializer() throws Exception { runProduceConsumeMultipleTopics(false); } - @Test(timeout = 60000) - public void testAllDeletes() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testAllDeletes() throws Exception { runAllDeletesTest(); } - @Test(timeout = 60000) - public void testMetricsAndEndOfStream() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testMetricsAndEndOfStream() throws Exception { runEndOfStreamTest(); } // --- startup mode --- - @Test(timeout = 60000) - public void testStartFromEarliestOffsets() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testStartFromEarliestOffsets() throws Exception { runStartFromEarliestOffsets(); } - @Test(timeout = 60000) - public void testStartFromLatestOffsets() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testStartFromLatestOffsets() throws Exception { runStartFromLatestOffsets(); } - @Test(timeout = 60000) - public void testStartFromGroupOffsets() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testStartFromGroupOffsets() throws Exception { runStartFromGroupOffsets(); } - @Test(timeout = 60000) - public void testStartFromSpecificOffsets() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testStartFromSpecificOffsets() throws Exception { runStartFromSpecificOffsets(); } - @Test(timeout = 60000) - public void testStartFromTimestamp() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testStartFromTimestamp() throws Exception { runStartFromTimestamp(); } // --- offset committing --- - @Test(timeout = 60000) - public void testCommitOffsetsToKafka() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testCommitOffsetsToKafka() throws Exception { runCommitOffsetsToKafka(); } - @Test(timeout = 60000) - public void testAutoOffsetRetrievalAndCommitToKafka() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testAutoOffsetRetrievalAndCommitToKafka() throws Exception { runAutoOffsetRetrievalAndCommitToKafka(); } - @Test(timeout = 60000) - public void testCollectingSchema() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testCollectingSchema() throws Exception { runCollectingSchemaTest(); } /** Kafka 20 specific test, ensuring Timestamps are properly written to and read from Kafka. */ - @Test(timeout = 60000) - public void testTimestamps() throws Exception { + @Test + @Timeout(value = 60L, unit = TimeUnit.SECONDS) + void testTimestamps() throws Exception { final String topic = "tstopic-" + UUID.randomUUID(); createTestTopic(topic, 3, 1); @@ -204,7 +229,7 @@ public void testTimestamps() throws Exception { boolean running = true; @Override - public void run(SourceContext ctx) throws Exception { + public void run(SourceContext ctx) { long i = 0; while (running) { ctx.collectWithTimestamp(i, i * 2); @@ -368,8 +393,7 @@ public Long deserialize(ConsumerRecord record) throws IOExceptio cnt++; DataInputView in = new DataInputViewStreamWrapper(new ByteArrayInputStream(record.value())); - Long e = ser.deserialize(in); - return e; + return ser.deserialize(in); } @Override diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaMigrationTestBase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaMigrationTestBase.java index b08bb05e3..60cc247ae 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaMigrationTestBase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaMigrationTestBase.java @@ -27,10 +27,10 @@ import org.apache.flink.streaming.util.OperatorSnapshotUtil; import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,20 +79,20 @@ public String getOperatorSnapshotPath(FlinkVersion version) { * Override {@link KafkaTestBase}. Kafka Migration Tests are starting up Kafka/ZooKeeper cluster * manually */ - @BeforeClass - public static void prepare() throws Exception {} + @BeforeAll + protected static void prepare() throws Exception {} /** * Override {@link KafkaTestBase}. Kafka Migration Tests are starting up Kafka/ZooKeeper cluster * manually */ - @AfterClass - public static void shutDownServices() throws Exception {} + @AfterAll + protected static void shutDownServices() throws Exception {} /** Manually run this to write binary snapshot data. */ - @Ignore + @Disabled @Test - public void writeSnapshot() throws Exception { + void writeSnapshot() throws Exception { try { checkState(flinkGenerateSavepointVersion.isPresent()); startClusters(); @@ -129,7 +129,7 @@ private OperatorSubtaskState initializeTestState() throws Exception { @SuppressWarnings("warning") @Test - public void testRestoreProducer() throws Exception { + void testRestoreProducer() throws Exception { try { startClusters(); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerAtLeastOnceITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerAtLeastOnceITCase.java index aae2680bd..405a371c8 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerAtLeastOnceITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerAtLeastOnceITCase.java @@ -18,14 +18,14 @@ package org.apache.flink.streaming.connectors.kafka; -import org.junit.BeforeClass; +import org.junit.jupiter.api.BeforeAll; /** IT cases for the {@link FlinkKafkaProducer}. */ @SuppressWarnings("serial") -public class KafkaProducerAtLeastOnceITCase extends KafkaProducerTestBase { +class KafkaProducerAtLeastOnceITCase extends KafkaProducerTestBase { - @BeforeClass - public static void prepare() throws Exception { + @BeforeAll + protected static void prepare() throws Exception { KafkaProducerTestBase.prepare(); ((KafkaTestEnvironmentImpl) kafkaServer) .setProducerSemantic(FlinkKafkaProducer.Semantic.AT_LEAST_ONCE); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerExactlyOnceITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerExactlyOnceITCase.java index f8b20ae02..2a1525b60 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerExactlyOnceITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerExactlyOnceITCase.java @@ -18,21 +18,21 @@ package org.apache.flink.streaming.connectors.kafka; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** IT cases for the {@link FlinkKafkaProducer}. */ @SuppressWarnings("serial") -public class KafkaProducerExactlyOnceITCase extends KafkaProducerTestBase { - @BeforeClass - public static void prepare() throws Exception { +class KafkaProducerExactlyOnceITCase extends KafkaProducerTestBase { + @BeforeAll + protected static void prepare() throws Exception { KafkaProducerTestBase.prepare(); ((KafkaTestEnvironmentImpl) kafkaServer) .setProducerSemantic(FlinkKafkaProducer.Semantic.EXACTLY_ONCE); } @Test - public void testMultipleSinkOperators() throws Exception { + void testMultipleSinkOperators() throws Exception { testExactlyOnce(false, 2); } } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java index 624381068..bd09fd8db 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java @@ -40,7 +40,7 @@ import org.apache.flink.test.util.TestUtils; import org.apache.flink.util.Preconditions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.Serializable; import java.util.ArrayList; @@ -87,7 +87,7 @@ public abstract class KafkaProducerTestBase extends KafkaTestBaseWithFlink { * partitions are present. */ @Test - public void testCustomPartitioning() { + void testCustomPartitioning() { try { LOG.info("Starting KafkaProducerITCase.testCustomPartitioning()"); @@ -204,13 +204,13 @@ public void cancel() { /** Tests the exactly-once semantic for the simple writes into Kafka. */ @Test - public void testExactlyOnceRegularSink() throws Exception { + void testExactlyOnceRegularSink() throws Exception { testExactlyOnce(true, 1); } /** Tests the exactly-once semantic for the simple writes into Kafka. */ @Test - public void testExactlyOnceCustomOperator() throws Exception { + void testExactlyOnceCustomOperator() throws Exception { testExactlyOnce(false, 1); } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java index 9fb16d40c..b201a5c15 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java @@ -34,12 +34,11 @@ import org.apache.flink.util.InstantiationUtil; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testcontainers.junit.jupiter.Container; import java.io.Serializable; import java.util.Properties; @@ -52,7 +51,7 @@ * can make sure our consumer is properly handling cases where we run into out of offset errors */ @SuppressWarnings("serial") -public class KafkaShortRetentionTestBase implements Serializable { +class KafkaShortRetentionTestBase implements Serializable { protected static final Logger LOG = LoggerFactory.getLogger(KafkaShortRetentionTestBase.class); @@ -65,7 +64,7 @@ public class KafkaShortRetentionTestBase implements Serializable { private static KafkaTestEnvironment kafkaServer; private static Properties standardProps; - @ClassRule + @Container public static MiniClusterWithClientResource flink = new MiniClusterWithClientResource( new MiniClusterResourceConfiguration.Builder() @@ -74,8 +73,6 @@ public class KafkaShortRetentionTestBase implements Serializable { .setNumberSlotsPerTaskManager(TM_SLOTS) .build()); - @ClassRule public static TemporaryFolder tempFolder = new TemporaryFolder(); - protected static Properties secureProps = new Properties(); private static Configuration getConfiguration() { @@ -84,8 +81,8 @@ private static Configuration getConfiguration() { return flinkConfig; } - @BeforeClass - public static void prepare() throws Exception { + @BeforeAll + static void prepare() throws Exception { LOG.info("-------------------------------------------------------------------------"); LOG.info(" Starting KafkaShortRetentionTestBase "); LOG.info("-------------------------------------------------------------------------"); @@ -113,8 +110,8 @@ public static void prepare() throws Exception { standardProps = kafkaServer.getStandardProperties(); } - @AfterClass - public static void shutDownServices() throws Exception { + @AfterAll + static void shutDownServices() throws Exception { kafkaServer.shutdown(); secureProps.clear(); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java index deafb7d6d..6dacb1e17 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java @@ -28,21 +28,18 @@ import org.apache.flink.streaming.util.TestStreamEnvironment; import org.apache.flink.test.util.SuccessException; import org.apache.flink.testutils.junit.RetryOnFailure; -import org.apache.flink.testutils.junit.RetryRule; import org.apache.flink.util.InstantiationUtil; -import org.apache.flink.util.TestLogger; import com.google.common.base.MoreObjects; +import org.apache.flink.util.TestLoggerExtension; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerRecord; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.extension.ExtendWith; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,7 +76,8 @@ */ @SuppressWarnings("serial") @RetryOnFailure(times = 3) -public abstract class KafkaTestBase extends TestLogger { +@ExtendWith(TestLoggerExtension.class) +public abstract class KafkaTestBase { public static final Logger LOG = LoggerFactory.getLogger(KafkaTestBase.class); @@ -96,18 +94,14 @@ public abstract class KafkaTestBase extends TestLogger { public static List kafkaClusters = new ArrayList<>(); - @ClassRule public static TemporaryFolder tempFolder = new TemporaryFolder(); - public static Properties secureProps = new Properties(); - @Rule public final RetryRule retryRule = new RetryRule(); - // ------------------------------------------------------------------------ // Setup and teardown of the mini clusters // ------------------------------------------------------------------------ - @BeforeClass - public static void prepare() throws Exception { + @BeforeAll + protected static void prepare() throws Exception { LOG.info("-------------------------------------------------------------------------"); LOG.info(" Starting KafkaTestBase "); LOG.info("-------------------------------------------------------------------------"); @@ -115,8 +109,8 @@ public static void prepare() throws Exception { startClusters(false, numKafkaClusters); } - @AfterClass - public static void shutDownServices() throws Exception { + @AfterAll + protected static void shutDownServices() throws Exception { LOG.info("-------------------------------------------------------------------------"); LOG.info(" Shut down KafkaTestBase "); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBaseWithFlink.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBaseWithFlink.java index 1c8d651f0..163b25cb2 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBaseWithFlink.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBaseWithFlink.java @@ -20,7 +20,7 @@ import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration; import org.apache.flink.test.util.MiniClusterWithClientResource; -import org.junit.ClassRule; +import org.testcontainers.junit.jupiter.Container; /** The base for the Kafka tests with Flink's MiniCluster. */ @SuppressWarnings("serial") @@ -30,7 +30,7 @@ public abstract class KafkaTestBaseWithFlink extends KafkaTestBase { protected static final int TM_SLOTS = 8; - @ClassRule + @Container public static MiniClusterWithClientResource flink = new MiniClusterWithClientResource( new MiniClusterResourceConfiguration.Builder() diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java index 92fc08eb7..7572c71d2 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java @@ -28,7 +28,7 @@ import org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService; import org.apache.flink.util.SerializedValue; -import org.junit.Test; +import org.junit.jupiter.api.Test; import javax.annotation.Nonnull; @@ -43,10 +43,10 @@ /** Tests for the {@link AbstractFetcher}. */ @SuppressWarnings("serial") -public class AbstractFetcherTest { +class AbstractFetcherTest { @Test - public void testIgnorePartitionStateSentinelInSnapshot() throws Exception { + void testIgnorePartitionStateSentinelInSnapshot() throws Exception { final String testTopic = "test topic name"; Map originalPartitions = new HashMap<>(); originalPartitions.put( @@ -93,7 +93,7 @@ public void onException(Throwable cause) { // ------------------------------------------------------------------------ @Test - public void testSkipCorruptedRecord() throws Exception { + void testSkipCorruptedRecord() throws Exception { final String testTopic = "test topic name"; Map originalPartitions = new HashMap<>(); originalPartitions.put( @@ -127,7 +127,7 @@ public void testSkipCorruptedRecord() throws Exception { } @Test - public void testConcurrentPartitionsDiscoveryAndLoopFetching() throws Exception { + void testConcurrentPartitionsDiscoveryAndLoopFetching() throws Exception { // test data final KafkaTopicPartition testPartition = new KafkaTopicPartition("test", 42); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherWatermarksTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherWatermarksTest.java index 6cd3b6453..f07fa2349 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherWatermarksTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherWatermarksTest.java @@ -31,12 +31,13 @@ import org.apache.flink.streaming.runtime.operators.util.AssignerWithPunctuatedWatermarksAdapter; import org.apache.flink.streaming.runtime.tasks.ProcessingTimeService; import org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameter; +import org.apache.flink.testutils.junit.extensions.parameterized.ParameterizedTestExtension; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameters; import org.apache.flink.util.SerializedValue; -import org.junit.Test; -import org.junit.experimental.runners.Enclosed; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import javax.annotation.Nonnull; import javax.annotation.Nullable; @@ -54,14 +55,13 @@ /** Tests for the watermarking behaviour of {@link AbstractFetcher}. */ @SuppressWarnings("serial") -@RunWith(Enclosed.class) -public class AbstractFetcherWatermarksTest { +class AbstractFetcherWatermarksTest { /** Tests with watermark generators that have a periodic nature. */ - @RunWith(Parameterized.class) + @ExtendWith(ParameterizedTestExtension.class) public static class PeriodicWatermarksSuite { - @Parameterized.Parameters + @Parameters public static Collection> getParams() { return Arrays.asList( new AssignerWithPeriodicWatermarksAdapter.Strategy<>( @@ -70,10 +70,10 @@ public static Collection> getParams() { .withTimestampAssigner((event, previousTimestamp) -> event)); } - @Parameterized.Parameter public WatermarkStrategy testWmStrategy; + @Parameter public WatermarkStrategy testWmStrategy; @Test - public void testPeriodicWatermarks() throws Exception { + void testPeriodicWatermarks() throws Exception { final String testTopic = "test topic name"; Map originalPartitions = new HashMap<>(); originalPartitions.put( @@ -161,7 +161,7 @@ public void testPeriodicWatermarks() throws Exception { } @Test - public void testSkipCorruptedRecordWithPeriodicWatermarks() throws Exception { + void testSkipCorruptedRecordWithPeriodicWatermarks() throws Exception { final String testTopic = "test topic name"; Map originalPartitions = new HashMap<>(); originalPartitions.put( @@ -212,7 +212,7 @@ public void testSkipCorruptedRecordWithPeriodicWatermarks() throws Exception { } @Test - public void testPeriodicWatermarksWithNoSubscribedPartitionsShouldYieldNoWatermarks() + void testPeriodicWatermarksWithNoSubscribedPartitionsShouldYieldNoWatermarks() throws Exception { final String testTopic = "test topic name"; Map originalPartitions = new HashMap<>(); @@ -248,7 +248,7 @@ public void testPeriodicWatermarksWithNoSubscribedPartitionsShouldYieldNoWaterma public static class PunctuatedWatermarksSuite { @Test - public void testSkipCorruptedRecordWithPunctuatedWatermarks() throws Exception { + void testSkipCorruptedRecordWithPunctuatedWatermarks() throws Exception { final String testTopic = "test topic name"; Map originalPartitions = new HashMap<>(); originalPartitions.put( @@ -296,7 +296,7 @@ public void testSkipCorruptedRecordWithPunctuatedWatermarks() throws Exception { } @Test - public void testPunctuatedWatermarks() throws Exception { + void testPunctuatedWatermarks() throws Exception { final String testTopic = "test topic name"; Map originalPartitions = new HashMap<>(); originalPartitions.put( diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscovererTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscovererTest.java index b47534596..35bace6f2 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscovererTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscovererTest.java @@ -19,10 +19,11 @@ package org.apache.flink.streaming.connectors.kafka.internals; import org.apache.flink.streaming.connectors.kafka.testutils.TestPartitionDiscoverer; +import org.apache.flink.testutils.junit.extensions.parameterized.ParameterizedTestExtension; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameters; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.util.ArrayList; import java.util.Arrays; @@ -40,7 +41,7 @@ * Tests that the partition assignment in the partition discoverer is deterministic and stable, with * both fixed and growing partitions. */ -@RunWith(Parameterized.class) +@ExtendWith(ParameterizedTestExtension.class) public class AbstractPartitionDiscovererTest { private static final String TEST_TOPIC = "test-topic"; @@ -52,7 +53,7 @@ public AbstractPartitionDiscovererTest(KafkaTopicsDescriptor topicsDescriptor) { this.topicsDescriptor = topicsDescriptor; } - @Parameterized.Parameters(name = "KafkaTopicsDescriptor = {0}") + @Parameters(name = "KafkaTopicsDescriptor = {0}") @SuppressWarnings("unchecked") public static Collection timeCharacteristic() { return Arrays.asList( @@ -65,7 +66,7 @@ public static Collection timeCharacteristic() { } @Test - public void testPartitionsEqualConsumersFixedPartitions() throws Exception { + void testPartitionsEqualConsumersFixedPartitions() throws Exception { List mockGetAllPartitionsForTopicsReturn = Arrays.asList( new KafkaTopicPartition(TEST_TOPIC, 0), @@ -117,7 +118,7 @@ public void testPartitionsEqualConsumersFixedPartitions() throws Exception { } @Test - public void testMultiplePartitionsPerConsumersFixedPartitions() { + void testMultiplePartitionsPerConsumersFixedPartitions() { try { final int[] partitionIDs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; @@ -186,7 +187,7 @@ public void testMultiplePartitionsPerConsumersFixedPartitions() { } @Test - public void testPartitionsFewerThanConsumersFixedPartitions() { + void testPartitionsFewerThanConsumersFixedPartitions() { try { List mockGetAllPartitionsForTopicsReturn = Arrays.asList( @@ -248,7 +249,7 @@ public void testPartitionsFewerThanConsumersFixedPartitions() { } @Test - public void testGrowingPartitions() { + void testGrowingPartitions() { try { final int[] newPartitionIDs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; List allPartitions = new ArrayList<>(11); @@ -417,7 +418,7 @@ public void testGrowingPartitions() { } @Test - public void testDeterministicAssignmentWithDifferentFetchedPartitionOrdering() + void testDeterministicAssignmentWithDifferentFetchedPartitionOrdering() throws Exception { int numSubtasks = 4; @@ -478,7 +479,7 @@ public void testDeterministicAssignmentWithDifferentFetchedPartitionOrdering() } @Test - public void testNonContiguousPartitionIdDiscovery() throws Exception { + void testNonContiguousPartitionIdDiscovery() throws Exception { List mockGetAllPartitionsForTopicsReturn1 = Arrays.asList( new KafkaTopicPartition("test-topic", 1), diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java index 8697b1486..27f91d510 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java @@ -18,7 +18,7 @@ package org.apache.flink.streaming.connectors.kafka.internals; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.List; @@ -27,17 +27,18 @@ import static java.util.Arrays.asList; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assertions.fail; /** Tests for the {@link ClosableBlockingQueue}. */ -public class ClosableBlockingQueueTest { +class ClosableBlockingQueueTest { // ------------------------------------------------------------------------ // single-threaded unit tests // ------------------------------------------------------------------------ @Test - public void testCreateQueueHashCodeEquals() { + void testCreateQueueHashCodeEquals() { try { ClosableBlockingQueue queue1 = new ClosableBlockingQueue<>(); ClosableBlockingQueue queue2 = new ClosableBlockingQueue<>(22); @@ -46,10 +47,10 @@ public void testCreateQueueHashCodeEquals() { assertThat(queue2.isOpen()).isTrue(); assertThat(queue1.isEmpty()).isTrue(); assertThat(queue2.isEmpty()).isTrue(); - assertThat(queue1.size()).isEqualTo(0); - assertThat(queue2.size()).isEqualTo(0); + assertThat(queue1.size()).isZero(); + assertThat(queue2.size()).isZero(); - assertThat(queue1.hashCode()).isEqualTo(queue2.hashCode()); + assertThat(queue1).hasSameHashCodeAs(queue2); //noinspection EqualsWithItself assertThat(queue1.equals(queue1)).isTrue(); //noinspection EqualsWithItself @@ -91,7 +92,7 @@ public void testCreateQueueHashCodeEquals() { } @Test - public void testCloseEmptyQueue() { + void testCloseEmptyQueue() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); assertThat(queue.isOpen()).isTrue(); @@ -101,12 +102,7 @@ public void testCloseEmptyQueue() { assertThat(queue.addIfOpen("element")).isFalse(); assertThat(queue.isEmpty()).isTrue(); - try { - queue.add("some element"); - fail("should cause an exception"); - } catch (IllegalStateException ignored) { - // expected - } + assertThatThrownBy(() -> queue.add("some element")).isInstanceOf(IllegalStateException.class); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -114,7 +110,7 @@ public void testCloseEmptyQueue() { } @Test - public void testCloseNonEmptyQueue() { + void testCloseNonEmptyQueue() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(asList(1, 2, 3)); assertThat(queue.isOpen()).isTrue(); @@ -135,12 +131,7 @@ public void testCloseNonEmptyQueue() { assertThat(queue.addIfOpen(42)).isFalse(); assertThat(queue.isEmpty()).isTrue(); - try { - queue.add(99); - fail("should cause an exception"); - } catch (IllegalStateException ignored) { - // expected - } + assertThatThrownBy(() -> queue.add(99)).isInstanceOf(IllegalStateException.class); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -148,7 +139,7 @@ public void testCloseNonEmptyQueue() { } @Test - public void testPeekAndPoll() { + void testPeekAndPoll() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); @@ -157,7 +148,7 @@ public void testPeekAndPoll() { assertThat(queue.poll()).isNull(); assertThat(queue.poll()).isNull(); - assertThat(queue.size()).isEqualTo(0); + assertThat(queue.size()).isZero(); queue.add("a"); queue.add("b"); @@ -181,26 +172,15 @@ public void testPeekAndPoll() { assertThat(queue.poll()).isEqualTo("c"); - assertThat(queue.size()).isEqualTo(0); + assertThat(queue.size()).isZero(); assertThat(queue.poll()).isNull(); assertThat(queue.peek()).isNull(); assertThat(queue.peek()).isNull(); assertThat(queue.close()).isTrue(); - try { - queue.peek(); - fail("should cause an exception"); - } catch (IllegalStateException ignored) { - // expected - } - - try { - queue.poll(); - fail("should cause an exception"); - } catch (IllegalStateException ignored) { - // expected - } + assertThatThrownBy(() -> queue.peek()).isInstanceOf(IllegalStateException.class); + assertThatThrownBy(() -> queue.poll()).isInstanceOf(IllegalStateException.class); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -208,7 +188,7 @@ public void testPeekAndPoll() { } @Test - public void testPollBatch() { + void testPollBatch() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); @@ -227,12 +207,7 @@ public void testPollBatch() { assertThat(queue.close()).isTrue(); - try { - queue.pollBatch(); - fail("should cause an exception"); - } catch (IllegalStateException ignored) { - // expected - } + assertThatThrownBy(() -> queue.pollBatch()).isInstanceOf(IllegalStateException.class); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -240,7 +215,7 @@ public void testPollBatch() { } @Test - public void testGetElementBlocking() { + void testGetElementBlocking() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); @@ -248,7 +223,7 @@ public void testGetElementBlocking() { assertThat(queue.getElementBlocking(3)).isNull(); assertThat(queue.getElementBlocking(2)).isNull(); - assertThat(queue.size()).isEqualTo(0); + assertThat(queue.size()).isZero(); queue.add("a"); queue.add("b"); @@ -269,7 +244,7 @@ public void testGetElementBlocking() { assertThat(queue.getElementBlocking()).isEqualTo("e"); assertThat(queue.getElementBlocking(1786598)).isEqualTo("f"); - assertThat(queue.size()).isEqualTo(0); + assertThat(queue.size()).isZero(); assertThat(queue.getElementBlocking(1)).isNull(); assertThat(queue.getElementBlocking(3)).isNull(); @@ -277,19 +252,9 @@ public void testGetElementBlocking() { assertThat(queue.close()).isTrue(); - try { - queue.getElementBlocking(); - fail("should cause an exception"); - } catch (IllegalStateException ignored) { - // expected - } - - try { - queue.getElementBlocking(1000000000L); - fail("should cause an exception"); - } catch (IllegalStateException ignored) { - // expected - } + assertThatThrownBy(() -> queue.getElementBlocking()).isInstanceOf(IllegalStateException.class); + assertThatThrownBy(() -> queue.getElementBlocking(1000000000L)) + .isInstanceOf(IllegalStateException.class); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -297,7 +262,7 @@ public void testGetElementBlocking() { } @Test - public void testGetBatchBlocking() { + void testGetBatchBlocking() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); @@ -333,19 +298,9 @@ public void testGetBatchBlocking() { assertThat(queue.close()).isTrue(); - try { - queue.getBatchBlocking(); - fail("should cause an exception"); - } catch (IllegalStateException ignored) { - // expected - } - - try { - queue.getBatchBlocking(1000000000L); - fail("should cause an exception"); - } catch (IllegalStateException ignored) { - // expected - } + assertThatThrownBy(() -> queue.getBatchBlocking()).isInstanceOf(IllegalStateException.class); + assertThatThrownBy(() -> queue.getBatchBlocking(1000000000L)) + .isInstanceOf(IllegalStateException.class); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -357,7 +312,7 @@ public void testGetBatchBlocking() { // ------------------------------------------------------------------------ @Test - public void notifyOnClose() { + void notifyOnClose() { try { final long oneYear = 365L * 24 * 60 * 60 * 1000; @@ -412,7 +367,7 @@ public void call() throws Exception { @SuppressWarnings("ThrowableResultOfMethodCallIgnored") @Test - public void testMultiThreadedAddGet() { + void testMultiThreadedAddGet() { try { final ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); final AtomicReference pushErrorRef = new AtomicReference<>(); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java index 30e651691..2c0826837 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java @@ -18,7 +18,7 @@ package org.apache.flink.streaming.connectors.kafka.internals; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.lang.reflect.Field; import java.lang.reflect.Modifier; @@ -27,10 +27,10 @@ import static org.assertj.core.api.Assertions.fail; /** Tests for the {@link KafkaTopicPartition}. */ -public class KafkaTopicPartitionTest { +class KafkaTopicPartitionTest { @Test - public void validateUid() { + void validateUid() { Field uidField; try { uidField = KafkaTopicPartition.class.getDeclaredField("serialVersionUID"); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptorTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptorTest.java index bb029d85e..1fc998549 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptorTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptorTest.java @@ -17,9 +17,11 @@ package org.apache.flink.streaming.connectors.kafka.internals; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.apache.flink.testutils.junit.extensions.parameterized.ParameterizedTestExtension; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameters; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.util.Arrays; import java.util.Collection; @@ -29,10 +31,10 @@ import static org.assertj.core.api.Assertions.assertThat; /** Tests for the {@link KafkaTopicsDescriptor}. */ -@RunWith(Parameterized.class) +@ExtendWith(ParameterizedTestExtension.class) public class KafkaTopicsDescriptorTest { - @Parameterized.Parameters + @Parameters public static Collection data() { return Arrays.asList( new Object[][] { @@ -57,7 +59,7 @@ public KafkaTopicsDescriptorTest( } @Test - public void testIsMatchingTopic() { + void testIsMatchingTopic() { KafkaTopicsDescriptor topicsDescriptor = new KafkaTopicsDescriptor(fixedTopics, topicPattern); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleExactlyOnceITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleExactlyOnceITCase.java index 7d37f6c34..4f369d70c 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleExactlyOnceITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleExactlyOnceITCase.java @@ -27,11 +27,11 @@ import org.apache.flink.streaming.connectors.kafka.testutils.FailingIdentityMapper; import org.apache.flink.streaming.connectors.kafka.testutils.ValidatingExactlyOnceSink; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.util.UUID; +import java.util.concurrent.TimeUnit; import static org.apache.flink.streaming.api.TimeCharacteristic.EventTime; import static org.apache.flink.streaming.api.TimeCharacteristic.IngestionTime; @@ -39,9 +39,8 @@ import static org.apache.flink.test.util.TestUtils.tryExecute; /** Failure Recovery IT Test for KafkaShuffle. */ -public class KafkaShuffleExactlyOnceITCase extends KafkaShuffleTestBase { - - @Rule public final Timeout timeout = Timeout.millis(600000L); +@Timeout(value = 60000L, unit = TimeUnit.MILLISECONDS) +class KafkaShuffleExactlyOnceITCase extends KafkaShuffleTestBase { /** * Failure Recovery after processing 2/3 data with time characteristic: ProcessingTime. @@ -49,7 +48,7 @@ public class KafkaShuffleExactlyOnceITCase extends KafkaShuffleTestBase { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testFailureRecoveryProcessingTime() throws Exception { + void testFailureRecoveryProcessingTime() throws Exception { testKafkaShuffleFailureRecovery(1000, ProcessingTime); } @@ -59,7 +58,7 @@ public void testFailureRecoveryProcessingTime() throws Exception { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testFailureRecoveryIngestionTime() throws Exception { + void testFailureRecoveryIngestionTime() throws Exception { testKafkaShuffleFailureRecovery(1000, IngestionTime); } @@ -69,7 +68,7 @@ public void testFailureRecoveryIngestionTime() throws Exception { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testFailureRecoveryEventTime() throws Exception { + void testFailureRecoveryEventTime() throws Exception { testKafkaShuffleFailureRecovery(1000, EventTime); } @@ -79,7 +78,7 @@ public void testFailureRecoveryEventTime() throws Exception { *

Producer Parallelism = 2; Kafka Partition # = 3; Consumer Parallelism = 3. */ @Test - public void testAssignedToPartitionFailureRecoveryProcessingTime() throws Exception { + void testAssignedToPartitionFailureRecoveryProcessingTime() throws Exception { testAssignedToPartitionFailureRecovery(500, ProcessingTime); } @@ -89,7 +88,7 @@ public void testAssignedToPartitionFailureRecoveryProcessingTime() throws Except *

Producer Parallelism = 2; Kafka Partition # = 3; Consumer Parallelism = 3. */ @Test - public void testAssignedToPartitionFailureRecoveryIngestionTime() throws Exception { + void testAssignedToPartitionFailureRecoveryIngestionTime() throws Exception { testAssignedToPartitionFailureRecovery(500, IngestionTime); } @@ -99,7 +98,7 @@ public void testAssignedToPartitionFailureRecoveryIngestionTime() throws Excepti *

Producer Parallelism = 2; Kafka Partition # = 3; Consumer Parallelism = 3. */ @Test - public void testAssignedToPartitionFailureRecoveryEventTime() throws Exception { + void testAssignedToPartitionFailureRecoveryEventTime() throws Exception { testAssignedToPartitionFailureRecovery(500, EventTime); } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleITCase.java index 5505bdde3..d274d189e 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleITCase.java @@ -35,9 +35,8 @@ import org.apache.flink.util.PropertiesUtil; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.util.ArrayList; import java.util.Collection; @@ -46,6 +45,7 @@ import java.util.Map; import java.util.Properties; import java.util.UUID; +import java.util.concurrent.TimeUnit; import static org.apache.flink.streaming.api.TimeCharacteristic.EventTime; import static org.apache.flink.streaming.api.TimeCharacteristic.IngestionTime; @@ -57,9 +57,8 @@ import static org.assertj.core.api.Assertions.fail; /** Simple End to End Test for Kafka. */ -public class KafkaShuffleITCase extends KafkaShuffleTestBase { - - @Rule public final Timeout timeout = Timeout.millis(600000L); +@Timeout(value = 60000L, unit = TimeUnit.MILLISECONDS) +class KafkaShuffleITCase extends KafkaShuffleTestBase { /** * To test no data is lost or duplicated end-2-end with the default time characteristic: @@ -68,7 +67,7 @@ public class KafkaShuffleITCase extends KafkaShuffleTestBase { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testSimpleProcessingTime() throws Exception { + void testSimpleProcessingTime() throws Exception { testKafkaShuffle(200000, ProcessingTime); } @@ -78,7 +77,7 @@ public void testSimpleProcessingTime() throws Exception { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testSimpleIngestionTime() throws Exception { + void testSimpleIngestionTime() throws Exception { testKafkaShuffle(200000, IngestionTime); } @@ -88,7 +87,7 @@ public void testSimpleIngestionTime() throws Exception { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testSimpleEventTime() throws Exception { + void testSimpleEventTime() throws Exception { testKafkaShuffle(100000, EventTime); } @@ -98,7 +97,7 @@ public void testSimpleEventTime() throws Exception { *

Producer Parallelism = 2; Kafka Partition # = 3; Consumer Parallelism = 3. */ @Test - public void testAssignedToPartitionProcessingTime() throws Exception { + void testAssignedToPartitionProcessingTime() throws Exception { testAssignedToPartition(300000, ProcessingTime); } @@ -108,7 +107,7 @@ public void testAssignedToPartitionProcessingTime() throws Exception { *

Producer Parallelism = 2; Kafka Partition # = 3; Consumer Parallelism = 3. */ @Test - public void testAssignedToPartitionIngestionTime() throws Exception { + void testAssignedToPartitionIngestionTime() throws Exception { testAssignedToPartition(300000, IngestionTime); } @@ -118,7 +117,7 @@ public void testAssignedToPartitionIngestionTime() throws Exception { *

Producer Parallelism = 2; Kafka Partition # = 3; Consumer Parallelism = 3. */ @Test - public void testAssignedToPartitionEventTime() throws Exception { + void testAssignedToPartitionEventTime() throws Exception { testAssignedToPartition(100000, EventTime); } @@ -128,7 +127,7 @@ public void testAssignedToPartitionEventTime() throws Exception { *

Producer Parallelism = 2; Kafka Partition # = 3; Consumer Parallelism = 3. */ @Test - public void testWatermarkIncremental() throws Exception { + void testWatermarkIncremental() throws Exception { testWatermarkIncremental(100000); } @@ -138,7 +137,7 @@ public void testWatermarkIncremental() throws Exception { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testSerDeProcessingTime() throws Exception { + void testSerDeProcessingTime() throws Exception { testRecordSerDe(ProcessingTime); } @@ -149,7 +148,7 @@ public void testSerDeProcessingTime() throws Exception { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testSerDeIngestionTime() throws Exception { + void testSerDeIngestionTime() throws Exception { testRecordSerDe(IngestionTime); } @@ -160,7 +159,7 @@ public void testSerDeIngestionTime() throws Exception { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testSerDeEventTime() throws Exception { + void testSerDeEventTime() throws Exception { testRecordSerDe(EventTime); } @@ -171,7 +170,7 @@ public void testSerDeEventTime() throws Exception { *

Producer Parallelism = 1; Kafka Partition # = 1; Consumer Parallelism = 1. */ @Test - public void testWatermarkBroadcasting() throws Exception { + void testWatermarkBroadcasting() throws Exception { final int numberOfPartitions = 3; final int producerParallelism = 2; final int numElementsPerProducer = 1000; diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleTestBase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleTestBase.java index 064aebd7f..9eed87b0b 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleTestBase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleTestBase.java @@ -40,18 +40,18 @@ import org.apache.flink.test.util.SuccessException; import org.apache.flink.util.Collector; -import org.junit.BeforeClass; +import org.junit.jupiter.api.BeforeAll; import java.util.Random; import static org.apache.flink.streaming.api.TimeCharacteristic.EventTime; /** Base Test Class for KafkaShuffle. */ -public class KafkaShuffleTestBase extends KafkaConsumerTestBase { +class KafkaShuffleTestBase extends KafkaConsumerTestBase { static final long INIT_TIMESTAMP = System.currentTimeMillis(); - @BeforeClass - public static void prepare() throws Exception { + @BeforeAll + protected static void prepare() throws Exception { KafkaProducerTestBase.prepare(); ((KafkaTestEnvironmentImpl) kafkaServer) .setProducerSemantic(FlinkKafkaProducer.Semantic.EXACTLY_ONCE); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaChangelogTableITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaChangelogTableITCase.java index e8bc9e373..cf54e00ef 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaChangelogTableITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaChangelogTableITCase.java @@ -32,8 +32,8 @@ import org.apache.flink.table.api.config.OptimizerConfigOptions; import org.apache.kafka.clients.producer.ProducerConfig; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.ZoneId; @@ -45,17 +45,17 @@ import static org.apache.flink.streaming.connectors.kafka.table.KafkaTableTestUtils.waitingExpectedResults; /** IT cases for Kafka with changelog format for Table API & SQL. */ -public class KafkaChangelogTableITCase extends KafkaTableTestBase { +class KafkaChangelogTableITCase extends KafkaTableTestBase { - @Before - public void before() { + @BeforeEach + void before() { // we have to use single parallelism, // because we will count the messages in sink to terminate the job env.setParallelism(1); } @Test - public void testKafkaDebeziumChangelogSource() throws Exception { + void testKafkaDebeziumChangelogSource() throws Exception { final String topic = "changelog_topic"; createTestTopic(topic, 1, 1); @@ -182,7 +182,7 @@ public void testKafkaDebeziumChangelogSource() throws Exception { } @Test - public void testKafkaCanalChangelogSource() throws Exception { + void testKafkaCanalChangelogSource() throws Exception { final String topic = "changelog_canal"; createTestTopic(topic, 1, 1); @@ -323,7 +323,7 @@ public void testKafkaCanalChangelogSource() throws Exception { } @Test - public void testKafkaMaxwellChangelogSource() throws Exception { + void testKafkaMaxwellChangelogSource() throws Exception { final String topic = "changelog_maxwell"; createTestTopic(topic, 1, 1); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaConnectorOptionsUtilTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaConnectorOptionsUtilTest.java index e9959087c..182425c65 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaConnectorOptionsUtilTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaConnectorOptionsUtilTest.java @@ -23,7 +23,7 @@ import org.apache.flink.table.api.ValidationException; import org.apache.flink.table.types.DataType; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.HashMap; import java.util.Map; @@ -38,10 +38,10 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test for {@link KafkaConnectorOptionsUtil}. */ -public class KafkaConnectorOptionsUtilTest { +class KafkaConnectorOptionsUtilTest { @Test - public void testFormatProjection() { + void testFormatProjection() { final DataType dataType = DataTypes.ROW( FIELD("id", INT()), @@ -60,7 +60,7 @@ public void testFormatProjection() { } @Test - public void testMissingKeyFormatProjection() { + void testMissingKeyFormatProjection() { final DataType dataType = ROW(FIELD("id", INT())); final Map options = createTestOptions(); @@ -74,7 +74,7 @@ public void testMissingKeyFormatProjection() { } @Test - public void testInvalidKeyFormatFieldProjection() { + void testInvalidKeyFormatFieldProjection() { final DataType dataType = ROW(FIELD("id", INT()), FIELD("name", STRING())); final Map options = createTestOptions(); options.put("key.fields", "non_existing"); @@ -92,7 +92,7 @@ public void testInvalidKeyFormatFieldProjection() { } @Test - public void testInvalidKeyFormatPrefixProjection() { + void testInvalidKeyFormatPrefixProjection() { final DataType dataType = ROW(FIELD("k_part_1", INT()), FIELD("part_2", STRING()), FIELD("name", STRING())); final Map options = createTestOptions(); @@ -109,7 +109,7 @@ public void testInvalidKeyFormatPrefixProjection() { } @Test - public void testInvalidValueFormatProjection() { + void testInvalidValueFormatProjection() { final DataType dataType = ROW(FIELD("k_id", INT()), FIELD("id", STRING())); final Map options = createTestOptions(); options.put("key.fields", "k_id"); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaDynamicTableFactoryTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaDynamicTableFactoryTest.java index 1246d53a3..23a239766 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaDynamicTableFactoryTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaDynamicTableFactoryTest.java @@ -77,7 +77,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.common.TopicPartition; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.NullSource; @@ -187,7 +187,7 @@ public class KafkaDynamicTableFactoryTest { private static final DataType SCHEMA_DATA_TYPE = SCHEMA.toPhysicalRowDataType(); @Test - public void testTableSource() { + void testTableSource() { final DynamicTableSource actualSource = createTableSource(SCHEMA, getBasicSourceOptions()); final KafkaDynamicSource actualKafkaSource = (KafkaDynamicSource) actualSource; @@ -221,7 +221,7 @@ public void testTableSource() { } @Test - public void testTableSourceWithPattern() { + void testTableSourceWithPattern() { final Map modifiedOptions = getModifiedOptions( getBasicSourceOptions(), @@ -265,7 +265,7 @@ public void testTableSourceWithPattern() { } @Test - public void testTableSourceWithKeyValue() { + void testTableSourceWithKeyValue() { final DynamicTableSource actualSource = createTableSource(SCHEMA, getKeyValueOptions()); final KafkaDynamicSource actualKafkaSource = (KafkaDynamicSource) actualSource; // initialize stateful testing formats @@ -301,7 +301,7 @@ public void testTableSourceWithKeyValue() { } @Test - public void testTableSourceWithKeyValueAndMetadata() { + void testTableSourceWithKeyValueAndMetadata() { final Map options = getKeyValueOptions(); options.put("value.test-format.readable-metadata", "metadata_1:INT, metadata_2:STRING"); @@ -354,7 +354,7 @@ public void testTableSourceWithKeyValueAndMetadata() { } @Test - public void testTableSourceCommitOnCheckpointDisabled() { + void testTableSourceCommitOnCheckpointDisabled() { final Map modifiedOptions = getModifiedOptions( getBasicSourceOptions(), options -> options.remove("properties.group.id")); @@ -387,7 +387,7 @@ public void testTableSourceSetOffsetReset(final String strategyName) { } @Test - public void testTableSourceSetOffsetResetWithException() { + void testTableSourceSetOffsetResetWithException() { String errorStrategy = "errorStrategy"; assertThatThrownBy(() -> testTableSourceSetOffsetReset(errorStrategy)) .isInstanceOf(IllegalArgumentException.class) @@ -430,7 +430,7 @@ private void testSetOffsetResetForStartFromGroupOffsets(String value) { } @Test - public void testBoundedSpecificOffsetsValidate() { + void testBoundedSpecificOffsetsValidate() { final Map modifiedOptions = getModifiedOptions( getBasicSourceOptions(), @@ -446,7 +446,7 @@ public void testBoundedSpecificOffsetsValidate() { } @Test - public void testBoundedSpecificOffsets() { + void testBoundedSpecificOffsets() { testBoundedOffsets( "specific-offsets", options -> { @@ -468,7 +468,7 @@ public void testBoundedSpecificOffsets() { } @Test - public void testBoundedLatestOffset() { + void testBoundedLatestOffset() { testBoundedOffsets( "latest-offset", options -> {}, @@ -492,7 +492,7 @@ public void testBoundedLatestOffset() { } @Test - public void testBoundedGroupOffsets() { + void testBoundedGroupOffsets() { testBoundedOffsets( "group-offsets", options -> {}, @@ -512,7 +512,7 @@ public void testBoundedGroupOffsets() { } @Test - public void testBoundedTimestamp() { + void testBoundedTimestamp() { testBoundedOffsets( "timestamp", options -> { @@ -579,7 +579,7 @@ private void testBoundedOffsets( } @Test - public void testTableSink() { + void testTableSink() { final Map modifiedOptions = getModifiedOptions( getBasicSinkOptions(), @@ -619,7 +619,7 @@ public void testTableSink() { } @Test - public void testTableSinkSemanticTranslation() { + void testTableSinkSemanticTranslation() { final List semantics = Arrays.asList("exactly-once", "at-least-once", "none"); final EncodingFormat> valueEncodingFormat = new EncodingFormatMock(","); @@ -651,7 +651,7 @@ public void testTableSinkSemanticTranslation() { } @Test - public void testTableSinkWithKeyValue() { + void testTableSinkWithKeyValue() { final Map modifiedOptions = getModifiedOptions( getKeyValueOptions(), @@ -694,7 +694,7 @@ public void testTableSinkWithKeyValue() { } @Test - public void testTableSinkWithParallelism() { + void testTableSinkWithParallelism() { final Map modifiedOptions = getModifiedOptions( getBasicSinkOptions(), options -> options.put("sink.parallelism", "100")); @@ -728,7 +728,7 @@ public void testTableSinkWithParallelism() { } @Test - public void testTableSinkAutoCompleteSchemaRegistrySubject() { + void testTableSinkAutoCompleteSchemaRegistrySubject() { // only format verifyEncoderSubject( options -> { @@ -876,7 +876,7 @@ private SerializationSchema createDebeziumAvroSerSchema( // -------------------------------------------------------------------------------------------- @Test - public void testSourceTableWithTopicAndTopicPattern() { + void testSourceTableWithTopicAndTopicPattern() { assertThatThrownBy( () -> { final Map modifiedOptions = @@ -897,7 +897,7 @@ public void testSourceTableWithTopicAndTopicPattern() { } @Test - public void testMissingStartupTimestamp() { + void testMissingStartupTimestamp() { assertThatThrownBy( () -> { final Map modifiedOptions = @@ -917,7 +917,7 @@ public void testMissingStartupTimestamp() { } @Test - public void testMissingSpecificOffsets() { + void testMissingSpecificOffsets() { assertThatThrownBy( () -> { final Map modifiedOptions = @@ -938,7 +938,7 @@ public void testMissingSpecificOffsets() { } @Test - public void testInvalidSinkPartitioner() { + void testInvalidSinkPartitioner() { assertThatThrownBy( () -> { final Map modifiedOptions = @@ -956,7 +956,7 @@ public void testInvalidSinkPartitioner() { } @Test - public void testInvalidRoundRobinPartitionerWithKeyFields() { + void testInvalidRoundRobinPartitionerWithKeyFields() { assertThatThrownBy( () -> { final Map modifiedOptions = @@ -976,7 +976,7 @@ public void testInvalidRoundRobinPartitionerWithKeyFields() { } @Test - public void testExactlyOnceGuaranteeWithoutTransactionalIdPrefix() { + void testExactlyOnceGuaranteeWithoutTransactionalIdPrefix() { assertThatThrownBy( () -> { final Map modifiedOptions = @@ -1002,7 +1002,7 @@ public void testExactlyOnceGuaranteeWithoutTransactionalIdPrefix() { } @Test - public void testSinkWithTopicListOrTopicPattern() { + void testSinkWithTopicListOrTopicPattern() { Map modifiedOptions = getModifiedOptions( getBasicSinkOptions(), @@ -1039,7 +1039,7 @@ public void testSinkWithTopicListOrTopicPattern() { } @Test - public void testPrimaryKeyValidation() { + void testPrimaryKeyValidation() { final ResolvedSchema pkSchema = new ResolvedSchema( SCHEMA.getColumns(), @@ -1098,7 +1098,7 @@ public void testPrimaryKeyValidation() { } @Test - public void testDiscoverPartitionByDefault() { + void testDiscoverPartitionByDefault() { Map tableSourceOptions = getModifiedOptions( getBasicSourceOptions(), @@ -1136,7 +1136,7 @@ public void testDiscoverPartitionByDefault() { } @Test - public void testDisableDiscoverPartition() { + void testDisableDiscoverPartition() { Map tableSourceOptions = getModifiedOptions( getBasicSourceOptions(), diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableITCase.java index 409acd977..a28d44af6 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableITCase.java @@ -35,16 +35,18 @@ import org.apache.flink.table.data.RowData; import org.apache.flink.table.utils.EncodingUtils; import org.apache.flink.test.util.SuccessException; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameter; +import org.apache.flink.testutils.junit.extensions.parameterized.ParameterizedTestExtension; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameters; import org.apache.flink.types.Row; import org.apache.kafka.clients.consumer.NoOffsetForPartitionException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import org.assertj.core.api.Assertions; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.time.Duration; import java.time.Instant; @@ -76,21 +78,21 @@ import static org.assertj.core.api.HamcrestCondition.matching; /** Basic IT cases for the Kafka table source and sink. */ -@RunWith(Parameterized.class) -public class KafkaTableITCase extends KafkaTableTestBase { +@ExtendWith(ParameterizedTestExtension.class) +class KafkaTableITCase extends KafkaTableTestBase { private static final String JSON_FORMAT = "json"; private static final String AVRO_FORMAT = "avro"; private static final String CSV_FORMAT = "csv"; - @Parameterized.Parameter public String format; + @Parameter public String format; - @Parameterized.Parameters(name = "format = {0}") + @Parameters(name = "format = {0}") public static Collection parameters() { return Arrays.asList(JSON_FORMAT, AVRO_FORMAT, CSV_FORMAT); } - @Before + @BeforeEach public void before() { // we have to use single parallelism, // because we will count the messages in sink to terminate the job @@ -98,7 +100,7 @@ public void before() { } @Test - public void testKafkaSourceSink() throws Exception { + void testKafkaSourceSink() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "tstopic_" + format + "_" + UUID.randomUUID(); @@ -189,7 +191,7 @@ public void testKafkaSourceSink() throws Exception { } @Test - public void testKafkaSourceSinkWithBoundedSpecificOffsets() throws Exception { + void testKafkaSourceSinkWithBoundedSpecificOffsets() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "bounded_" + format + "_" + UUID.randomUUID(); @@ -243,7 +245,7 @@ public void testKafkaSourceSinkWithBoundedSpecificOffsets() throws Exception { } @Test - public void testKafkaSourceSinkWithBoundedTimestamp() throws Exception { + void testKafkaSourceSinkWithBoundedTimestamp() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "bounded_" + format + "_" + UUID.randomUUID(); @@ -300,7 +302,7 @@ public void testKafkaSourceSinkWithBoundedTimestamp() throws Exception { } @Test - public void testKafkaTableWithMultipleTopics() throws Exception { + void testKafkaTableWithMultipleTopics() throws Exception { // ---------- create source and sink tables ------------------- String tableTemp = "create table %s (\n" @@ -393,7 +395,7 @@ public void testKafkaTableWithMultipleTopics() throws Exception { } @Test - public void testKafkaSourceSinkWithMetadata() throws Exception { + void testKafkaSourceSinkWithMetadata() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "metadata_topic_" + format + "_" + UUID.randomUUID(); @@ -485,7 +487,7 @@ public void testKafkaSourceSinkWithMetadata() throws Exception { } @Test - public void testKafkaSourceSinkWithKeyAndPartialValue() throws Exception { + void testKafkaSourceSinkWithKeyAndPartialValue() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "key_partial_value_topic_" + format + "_" + UUID.randomUUID(); @@ -566,7 +568,7 @@ public void testKafkaSourceSinkWithKeyAndPartialValue() throws Exception { } @Test - public void testKafkaSourceSinkWithKeyAndFullValue() throws Exception { + void testKafkaSourceSinkWithKeyAndFullValue() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "key_full_value_topic_" + format + "_" + UUID.randomUUID(); @@ -644,7 +646,7 @@ public void testKafkaSourceSinkWithKeyAndFullValue() throws Exception { } @Test - public void testKafkaTemporalJoinChangelog() throws Exception { + void testKafkaTemporalJoinChangelog() throws Exception { // Set the session time zone to UTC, because the next `METADATA FROM // 'value.source.timestamp'` DDL // will use the session time zone when convert the changelog time from milliseconds to @@ -787,7 +789,7 @@ private void initialProductChangelog(String topic, String bootstraps) throws Exc } @Test - public void testPerPartitionWatermarkKafka() throws Exception { + void testPerPartitionWatermarkKafka() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "per_partition_watermark_topic_" + format + "_" + UUID.randomUUID(); @@ -877,7 +879,7 @@ public void testPerPartitionWatermarkKafka() throws Exception { } @Test - public void testPerPartitionWatermarkWithIdleSource() throws Exception { + void testPerPartitionWatermarkWithIdleSource() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "idle_partition_watermark_topic_" + format + "_" + UUID.randomUUID(); @@ -952,7 +954,7 @@ public void testPerPartitionWatermarkWithIdleSource() throws Exception { } @Test - public void testLatestOffsetStrategyResume() throws Exception { + void testLatestOffsetStrategyResume() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "latest_offset_resume_topic_" + format + "_" + UUID.randomUUID(); @@ -1084,17 +1086,17 @@ public void testLatestOffsetStrategyResume() throws Exception { } @Test - public void testStartFromGroupOffsetsLatest() throws Exception { + void testStartFromGroupOffsetsLatest() throws Exception { testStartFromGroupOffsets("latest"); } @Test - public void testStartFromGroupOffsetsEarliest() throws Exception { + void testStartFromGroupOffsetsEarliest() throws Exception { testStartFromGroupOffsets("earliest"); } @Test - public void testStartFromGroupOffsetsNone() { + void testStartFromGroupOffsetsNone() { Assertions.assertThatThrownBy(() -> testStartFromGroupOffsetsWithNoneResetStrategy()) .satisfies(FlinkAssertions.anyCauseMatches(NoOffsetForPartitionException.class)); } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableTestBase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableTestBase.java index cffe2d6c0..20773460b 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableTestBase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableTestBase.java @@ -35,13 +35,13 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.StringDeserializer; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.containers.KafkaContainer; import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.junit.jupiter.Container; import org.testcontainers.utility.DockerImageName; import java.time.Duration; @@ -63,7 +63,7 @@ public abstract class KafkaTableTestBase extends AbstractTestBase { private static final String INTER_CONTAINER_KAFKA_ALIAS = "kafka"; private static final int zkTimeoutMills = 30000; - @ClassRule + @Container public static final KafkaContainer KAFKA_CONTAINER = new KafkaContainer(DockerImageName.parse(DockerImageVersions.KAFKA)) { @Override @@ -87,8 +87,8 @@ protected void doStart() { // Timer for scheduling logging task if the test hangs private final Timer loggingTimer = new Timer("Debug Logging Timer"); - @Before - public void setup() { + @BeforeEach + void setup() { env = StreamExecutionEnvironment.getExecutionEnvironment(); tEnv = StreamTableEnvironment.create(env); env.getConfig().setRestartStrategy(RestartStrategies.noRestart()); @@ -107,8 +107,8 @@ public void setup() { }); } - @After - public void after() { + @AfterEach + void after() { // Cancel timer for debug logging cancelTimeoutLogger(); } diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/ReducingUpsertWriterTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/ReducingUpsertWriterTest.java index 0640b9ad7..f83111a05 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/ReducingUpsertWriterTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/ReducingUpsertWriterTest.java @@ -32,10 +32,11 @@ import org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext; import org.apache.flink.table.runtime.typeutils.InternalTypeInfo; import org.apache.flink.table.runtime.typeutils.RowDataSerializer; +import org.apache.flink.testutils.junit.extensions.parameterized.ParameterizedTestExtension; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameters; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.io.IOException; import java.time.Instant; @@ -55,9 +56,9 @@ import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link ReducingUpsertWriter}. */ -@RunWith(Parameterized.class) +@ExtendWith(ParameterizedTestExtension.class) public class ReducingUpsertWriterTest { - @Parameterized.Parameters(name = "object reuse = {0}") + @Parameters(name = "object reuse = {0}") public static Object[] enableObjectReuse() { return new Boolean[] {true, false}; } @@ -150,7 +151,7 @@ public ReducingUpsertWriterTest(boolean enableObjectReuse) { } @Test - public void testWriteData() throws Exception { + void testWriteData() throws Exception { final MockedSinkWriter writer = new MockedSinkWriter(); final ReducingUpsertWriter bufferedWriter = createBufferedWriter(writer); @@ -217,7 +218,7 @@ public void testWriteData() throws Exception { } @Test - public void testFlushDataWhenCheckpointing() throws Exception { + void testFlushDataWhenCheckpointing() throws Exception { final MockedSinkWriter writer = new MockedSinkWriter(); final ReducingUpsertWriter bufferedWriter = createBufferedWriter(writer); // write all data, there should be 3 records are still buffered @@ -265,7 +266,7 @@ public void testFlushDataWhenCheckpointing() throws Exception { } @Test - public void testWriteDataWithNullTimestamp() throws Exception { + void testWriteDataWithNullTimestamp() throws Exception { final MockedSinkWriter writer = new MockedSinkWriter(); final ReducingUpsertWriter bufferedWriter = createBufferedWriter(writer); diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaDynamicTableFactoryTest.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaDynamicTableFactoryTest.java index 15c740d21..8d070d0d3 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaDynamicTableFactoryTest.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaDynamicTableFactoryTest.java @@ -66,13 +66,12 @@ import org.apache.flink.table.types.DataType; import org.apache.flink.table.types.logical.RowType; import org.apache.flink.table.types.logical.VarCharType; -import org.apache.flink.util.TestLogger; +import org.apache.flink.util.TestLoggerExtension; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.common.TopicPartition; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.util.Arrays; import java.util.Collections; @@ -82,7 +81,7 @@ import java.util.Properties; import java.util.function.Consumer; -import static org.apache.flink.core.testutils.FlinkMatchers.containsCause; +import static org.apache.flink.core.testutils.FlinkAssertions.anyCauseMatches; import static org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.ScanBoundedMode; import static org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.AVRO_CONFLUENT; import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSink; @@ -91,7 +90,8 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test for {@link UpsertKafkaDynamicTableFactory}. */ -public class UpsertKafkaDynamicTableFactoryTest extends TestLogger { +@ExtendWith(TestLoggerExtension.class) +class UpsertKafkaDynamicTableFactoryTest { private static final String SOURCE_TOPIC = "sourceTopic_1"; @@ -148,10 +148,8 @@ public class UpsertKafkaDynamicTableFactoryTest extends TestLogger { new TestFormatFactory.DecodingFormatMock( ",", true, ChangelogMode.insertOnly(), Collections.emptyMap()); - @Rule public ExpectedException thrown = ExpectedException.none(); - @Test - public void testTableSource() { + void testTableSource() { final DataType producedDataType = SOURCE_SCHEMA.toPhysicalRowDataType(); // Construct table source using options and table source factory final DynamicTableSource actualSource = @@ -176,7 +174,7 @@ public void testTableSource() { } @Test - public void testTableSink() { + void testTableSink() { // Construct table sink using options and table sink factory. final Map modifiedOptions = getModifiedOptions( @@ -217,7 +215,7 @@ public void testTableSink() { @SuppressWarnings("rawtypes") @Test - public void testBufferedTableSink() { + void testBufferedTableSink() { // Construct table sink using options and table sink factory. final DynamicTableSink actualSink = createTableSink( @@ -274,7 +272,7 @@ public void testBufferedTableSink() { } @Test - public void testTableSinkWithParallelism() { + void testTableSinkWithParallelism() { final Map modifiedOptions = getModifiedOptions( getFullSinkOptions(), @@ -310,7 +308,7 @@ public void testTableSinkWithParallelism() { } @Test - public void testTableSinkAutoCompleteSchemaRegistrySubject() { + void testTableSinkAutoCompleteSchemaRegistrySubject() { // value.format + key.format verifyEncoderSubject( options -> { @@ -420,7 +418,7 @@ private SerializationSchema createConfluentAvroSerSchema( // -------------------------------------------------------------------------------------------- @Test - public void testBoundedSpecificOffsetsValidate() { + void testBoundedSpecificOffsetsValidate() { final Map options = getFullSourceOptions(); options.put( KafkaConnectorOptions.SCAN_BOUNDED_MODE.key(), @@ -434,12 +432,10 @@ public void testBoundedSpecificOffsetsValidate() { } @Test - public void testBoundedSpecificOffsets() { + void testBoundedSpecificOffsets() { testBoundedOffsets( ScanBoundedMode.SPECIFIC_OFFSETS, - options -> { - options.put("scan.bounded.specific-offsets", "partition:0,offset:2"); - }, + options -> options.put("scan.bounded.specific-offsets", "partition:0,offset:2"), source -> { assertThat(source.getBoundedness()).isEqualTo(Boundedness.BOUNDED); OffsetsInitializer offsetsInitializer = @@ -456,7 +452,7 @@ public void testBoundedSpecificOffsets() { } @Test - public void testBoundedLatestOffset() { + void testBoundedLatestOffset() { testBoundedOffsets( ScanBoundedMode.LATEST_OFFSET, options -> {}, @@ -480,12 +476,10 @@ public void testBoundedLatestOffset() { } @Test - public void testBoundedGroupOffsets() { + void testBoundedGroupOffsets() { testBoundedOffsets( ScanBoundedMode.GROUP_OFFSETS, - options -> { - options.put("properties.group.id", "dummy"); - }, + options -> options.put("properties.group.id", "dummy"), source -> { assertThat(source.getBoundedness()).isEqualTo(Boundedness.BOUNDED); OffsetsInitializer offsetsInitializer = @@ -502,12 +496,10 @@ public void testBoundedGroupOffsets() { } @Test - public void testBoundedTimestamp() { + void testBoundedTimestamp() { testBoundedOffsets( ScanBoundedMode.TIMESTAMP, - options -> { - options.put("scan.bounded.timestamp-millis", "1"); - }, + options -> options.put("scan.bounded.timestamp-millis", "1"), source -> { assertThat(source.getBoundedness()).isEqualTo(Boundedness.BOUNDED); OffsetsInitializer offsetsInitializer = @@ -552,117 +544,101 @@ public void testBoundedTimestamp() { // -------------------------------------------------------------------------------------------- @Test - public void testCreateSourceTableWithoutPK() { - thrown.expect(ValidationException.class); - thrown.expect( - containsCause( - new ValidationException( - "'upsert-kafka' tables require to define a PRIMARY KEY constraint. " - + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " - + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys."))); - + void testCreateSourceTableWithoutPK() { ResolvedSchema illegalSchema = ResolvedSchema.of( Column.physical("window_start", DataTypes.STRING()), Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT())); - createTableSource(illegalSchema, getFullSourceOptions()); + assertThatThrownBy(() -> createTableSource(illegalSchema, getFullSourceOptions())) + .satisfies(anyCauseMatches(ValidationException.class, + "'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys." + )); } @Test - public void testCreateSinkTableWithoutPK() { - thrown.expect(ValidationException.class); - thrown.expect( - containsCause( - new ValidationException( - "'upsert-kafka' tables require to define a PRIMARY KEY constraint. " - + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " - + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys."))); - + void testCreateSinkTableWithoutPK() { ResolvedSchema illegalSchema = ResolvedSchema.of( Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT())); - createTableSink(illegalSchema, getFullSinkOptions()); + assertThatThrownBy(() -> createTableSink(illegalSchema, getFullSinkOptions())) + .satisfies(anyCauseMatches(ValidationException.class, + "'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys." + )); } @Test - public void testSerWithCDCFormatAsValue() { - thrown.expect(ValidationException.class); - thrown.expect( - containsCause( - new ValidationException( + void testSerWithCDCFormatAsValue() { + assertThatThrownBy(() -> + createTableSink( + SINK_SCHEMA, + getModifiedOptions( + getFullSinkOptions(), + options -> + options.put( + String.format( + "value.%s.%s", + TestFormatFactory.IDENTIFIER, + TestFormatFactory.CHANGELOG_MODE.key()), + "I;UA;UB;D")))) + .satisfies(anyCauseMatches(ValidationException.class, String.format( "'upsert-kafka' connector doesn't support '%s' as value format, " + "because '%s' is not in insert-only mode.", TestFormatFactory.IDENTIFIER, - TestFormatFactory.IDENTIFIER)))); + TestFormatFactory.IDENTIFIER)) + ); - createTableSink( - SINK_SCHEMA, - getModifiedOptions( - getFullSinkOptions(), - options -> - options.put( - String.format( - "value.%s.%s", - TestFormatFactory.IDENTIFIER, - TestFormatFactory.CHANGELOG_MODE.key()), - "I;UA;UB;D"))); } @Test - public void testDeserWithCDCFormatAsValue() { - thrown.expect(ValidationException.class); - thrown.expect( - containsCause( - new ValidationException( + void testDeserWithCDCFormatAsValue() { + assertThatThrownBy(() -> + createTableSource( + SOURCE_SCHEMA, + getModifiedOptions( + getFullSourceOptions(), + options -> + options.put( + String.format( + "value.%s.%s", + TestFormatFactory.IDENTIFIER, + TestFormatFactory.CHANGELOG_MODE.key()), + "I;UA;UB;D")))) + .satisfies(anyCauseMatches(ValidationException.class, String.format( "'upsert-kafka' connector doesn't support '%s' as value format, " + "because '%s' is not in insert-only mode.", TestFormatFactory.IDENTIFIER, - TestFormatFactory.IDENTIFIER)))); - - createTableSource( - SOURCE_SCHEMA, - getModifiedOptions( - getFullSourceOptions(), - options -> - options.put( - String.format( - "value.%s.%s", - TestFormatFactory.IDENTIFIER, - TestFormatFactory.CHANGELOG_MODE.key()), - "I;UA;UB;D"))); + TestFormatFactory.IDENTIFIER)) + ); } @Test - public void testInvalidSinkBufferFlush() { - thrown.expect(ValidationException.class); - thrown.expect( - containsCause( - new ValidationException( + void testInvalidSinkBufferFlush() { + assertThatThrownBy(() -> + createTableSink( + SINK_SCHEMA, + getModifiedOptions( + getFullSinkOptions(), + options -> { + options.put("sink.buffer-flush.max-rows", "0"); + options.put("sink.buffer-flush.interval", "1s"); + }))) + .satisfies(anyCauseMatches(ValidationException.class, "'sink.buffer-flush.max-rows' and 'sink.buffer-flush.interval' " + "must be set to be greater than zero together to enable" - + " sink buffer flushing."))); - createTableSink( - SINK_SCHEMA, - getModifiedOptions( - getFullSinkOptions(), - options -> { - options.put("sink.buffer-flush.max-rows", "0"); - options.put("sink.buffer-flush.interval", "1s"); - })); + + " sink buffer flushing." + )); } @Test - public void testExactlyOnceGuaranteeWithoutTransactionalIdPrefix() { - thrown.expect(ValidationException.class); - thrown.expect( - containsCause( - new ValidationException( - "sink.transactional-id-prefix must be specified when using DeliveryGuarantee.EXACTLY_ONCE."))); - + void testExactlyOnceGuaranteeWithoutTransactionalIdPrefix() { final Map modifiedOptions = getModifiedOptions( getFullSinkOptions(), @@ -672,7 +648,10 @@ public void testExactlyOnceGuaranteeWithoutTransactionalIdPrefix() { KafkaConnectorOptions.DELIVERY_GUARANTEE.key(), DeliveryGuarantee.EXACTLY_ONCE.toString()); }); - createTableSink(SINK_SCHEMA, modifiedOptions); + assertThatThrownBy(() -> createTableSink(SINK_SCHEMA, modifiedOptions)) + .satisfies(anyCauseMatches(ValidationException.class, + "sink.transactional-id-prefix must be specified when using DeliveryGuarantee.EXACTLY_ONCE." + )); } // -------------------------------------------------------------------------------------------- diff --git a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaTableITCase.java b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaTableITCase.java index 1a6bf7e13..57c85790f 100644 --- a/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaTableITCase.java +++ b/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaTableITCase.java @@ -23,13 +23,13 @@ import org.apache.flink.table.api.Table; import org.apache.flink.table.api.TableResult; import org.apache.flink.table.planner.factories.TestValuesTableFactory; -import org.apache.flink.table.utils.LegacyRowResource; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameter; +import org.apache.flink.testutils.junit.extensions.parameterized.ParameterizedTestExtension; +import org.apache.flink.testutils.junit.extensions.parameterized.Parameters; import org.apache.flink.types.Row; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.time.Duration; import java.time.LocalDateTime; @@ -56,27 +56,25 @@ import static org.assertj.core.api.HamcrestCondition.matching; /** Upsert-kafka IT cases. */ -@RunWith(Parameterized.class) +@ExtendWith(ParameterizedTestExtension.class) public class UpsertKafkaTableITCase extends KafkaTableTestBase { private static final String JSON_FORMAT = "json"; private static final String CSV_FORMAT = "csv"; private static final String AVRO_FORMAT = "avro"; - @Parameterized.Parameter public String format; + @Parameter public String format; - @Parameterized.Parameters(name = "format = {0}") + @Parameters(name = "format = {0}") public static Object[] parameters() { return new Object[] {JSON_FORMAT, CSV_FORMAT, AVRO_FORMAT}; } - @Rule public final LegacyRowResource usesLegacyRows = LegacyRowResource.INSTANCE; - private static final String USERS_TOPIC = "users"; private static final String WORD_COUNT_TOPIC = "word_count"; @Test - public void testAggregate() throws Exception { + void testAggregate() throws Exception { String topic = WORD_COUNT_TOPIC + "_" + format; createTestTopic(topic, 4, 1); // ------------- test --------------- @@ -87,7 +85,7 @@ public void testAggregate() throws Exception { } @Test - public void testTemporalJoin() throws Exception { + void testTemporalJoin() throws Exception { String topic = USERS_TOPIC + "_" + format; createTestTopic(topic, 2, 1); // ------------- test --------------- @@ -110,7 +108,7 @@ public void testTemporalJoin() throws Exception { } @Test - public void testBufferedUpsertSink() throws Exception { + void testBufferedUpsertSink() throws Exception { final String topic = "buffered_upsert_topic_" + format; createTestTopic(topic, 1, 1); String bootstraps = getBootstrapServers(); @@ -199,7 +197,7 @@ public void testBufferedUpsertSink() throws Exception { } @Test - public void testBufferedUpsertSinkWithoutAssigningWatermark() throws Exception { + void testBufferedUpsertSinkWithoutAssigningWatermark() throws Exception { final String topic = "buffered_upsert_topic_without_assigning_watermark_" + format; createTestTopic(topic, 1, 1); String bootstraps = getBootstrapServers(); @@ -264,7 +262,7 @@ public void testBufferedUpsertSinkWithoutAssigningWatermark() throws Exception { } @Test - public void testSourceSinkWithKeyAndPartialValue() throws Exception { + void testSourceSinkWithKeyAndPartialValue() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "key_partial_value_topic_" + format; @@ -362,7 +360,7 @@ public void testSourceSinkWithKeyAndPartialValue() throws Exception { } @Test - public void testKafkaSourceSinkWithKeyAndFullValue() throws Exception { + void testKafkaSourceSinkWithKeyAndFullValue() throws Exception { // we always use a different topic name for each parameterized topic, // in order to make sure the topic can be created. final String topic = "key_full_value_topic_" + format; @@ -457,7 +455,7 @@ public void testKafkaSourceSinkWithKeyAndFullValue() throws Exception { } @Test - public void testUpsertKafkaSourceSinkWithBoundedSpecificOffsets() throws Exception { + void testUpsertKafkaSourceSinkWithBoundedSpecificOffsets() throws Exception { final String topic = "bounded_upsert_" + format + "_" + UUID.randomUUID(); createTestTopic(topic, 1, 1); @@ -510,7 +508,7 @@ public void testUpsertKafkaSourceSinkWithBoundedSpecificOffsets() throws Excepti } @Test - public void testUpsertKafkaSourceSinkWithBoundedTimestamp() throws Exception { + void testUpsertKafkaSourceSinkWithBoundedTimestamp() throws Exception { final String topic = "bounded_upsert_" + format + "_" + UUID.randomUUID(); createTestTopic(topic, 1, 1); @@ -596,7 +594,7 @@ public void testUpsertKafkaSourceSinkWithBoundedTimestamp() throws Exception { * results. */ @Test - public void testUpsertKafkaSourceSinkWithZeroLengthBoundedness() throws Exception { + void testUpsertKafkaSourceSinkWithZeroLengthBoundedness() throws Exception { final String topic = "bounded_upsert_" + format + "_" + UUID.randomUUID(); createTestTopic(topic, 1, 1);