Skip to content

Commit 39a1f2b

Browse files
committed
[FLINK-37644] Remove unused code and commons-collection
1 parent 35de2ce commit 39a1f2b

File tree

3 files changed

+0
-112
lines changed

3 files changed

+0
-112
lines changed

flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java

Lines changed: 0 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@
3333
import org.apache.flink.util.TestLogger;
3434

3535
import com.google.common.base.MoreObjects;
36-
import org.apache.kafka.clients.consumer.ConsumerRecord;
3736
import org.apache.kafka.clients.producer.Callback;
3837
import org.apache.kafka.clients.producer.KafkaProducer;
3938
import org.apache.kafka.clients.producer.ProducerConfig;
@@ -48,17 +47,12 @@
4847

4948
import javax.annotation.Nullable;
5049

51-
import java.nio.ByteBuffer;
5250
import java.util.ArrayList;
5351
import java.util.Collection;
54-
import java.util.HashSet;
5552
import java.util.List;
5653
import java.util.Properties;
57-
import java.util.Set;
5854
import java.util.concurrent.atomic.AtomicReference;
5955

60-
import static org.assertj.core.api.Assertions.fail;
61-
6256
/**
6357
* The base for the Kafka tests. It brings up:
6458
*
@@ -278,96 +272,6 @@ public static <K, V> void produceToKafka(
278272
}
279273
}
280274

281-
/**
282-
* We manually handle the timeout instead of using JUnit's timeout to return failure instead of
283-
* timeout error. After timeout we assume that there are missing records and there is a bug, not
284-
* that the test has run out of time.
285-
*/
286-
public void assertAtLeastOnceForTopic(
287-
Properties properties,
288-
String topic,
289-
int partition,
290-
Set<Integer> expectedElements,
291-
long timeoutMillis)
292-
throws Exception {
293-
294-
long startMillis = System.currentTimeMillis();
295-
Set<Integer> actualElements = new HashSet<>();
296-
297-
// until we timeout...
298-
while (System.currentTimeMillis() < startMillis + timeoutMillis) {
299-
properties.put(
300-
"key.deserializer",
301-
"org.apache.kafka.common.serialization.IntegerDeserializer");
302-
properties.put(
303-
"value.deserializer",
304-
"org.apache.kafka.common.serialization.IntegerDeserializer");
305-
// We need to set these two properties so that they are lower than request.timeout.ms.
306-
// This is
307-
// required for some old KafkaConsumer versions.
308-
properties.put("session.timeout.ms", "2000");
309-
properties.put("heartbeat.interval.ms", "500");
310-
311-
// query kafka for new records ...
312-
Collection<ConsumerRecord<Integer, Integer>> records =
313-
kafkaServer.getAllRecordsFromTopic(properties, topic);
314-
315-
for (ConsumerRecord<Integer, Integer> record : records) {
316-
actualElements.add(record.value());
317-
}
318-
319-
// succeed if we got all expectedElements
320-
if (actualElements.containsAll(expectedElements)) {
321-
return;
322-
}
323-
}
324-
325-
fail(
326-
String.format(
327-
"Expected to contain all of: <%s>, but was: <%s>",
328-
expectedElements, actualElements));
329-
}
330-
331-
public void assertExactlyOnceForTopic(
332-
Properties properties, String topic, List<Integer> expectedElements) {
333-
334-
List<Integer> actualElements = new ArrayList<>();
335-
336-
Properties consumerProperties = new Properties();
337-
consumerProperties.putAll(properties);
338-
consumerProperties.put(
339-
"key.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer");
340-
consumerProperties.put(
341-
"value.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer");
342-
consumerProperties.put("isolation.level", "read_committed");
343-
344-
// query kafka for new records ...
345-
Collection<ConsumerRecord<byte[], byte[]>> records =
346-
kafkaServer.getAllRecordsFromTopic(consumerProperties, topic);
347-
348-
for (ConsumerRecord<byte[], byte[]> record : records) {
349-
actualElements.add(ByteBuffer.wrap(record.value()).getInt());
350-
}
351-
352-
// succeed if we got all expectedElements
353-
if (actualElements.equals(expectedElements)) {
354-
return;
355-
}
356-
357-
fail(
358-
String.format(
359-
"Expected %s, but was: %s",
360-
formatElements(expectedElements), formatElements(actualElements)));
361-
}
362-
363-
private String formatElements(List<Integer> elements) {
364-
if (elements.size() > 50) {
365-
return String.format("number of elements: <%s>", elements.size());
366-
} else {
367-
return String.format("elements: <%s>", elements);
368-
}
369-
}
370-
371275
public static void setNumKafkaClusters(int size) {
372276
numKafkaClusters = size;
373277
}

flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironment.java

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,6 @@
1717

1818
package org.apache.flink.streaming.connectors.kafka;
1919

20-
import org.apache.kafka.clients.consumer.ConsumerRecord;
21-
22-
import java.util.Collection;
2320
import java.util.Map;
2421
import java.util.Properties;
2522
import java.util.UUID;
@@ -110,9 +107,6 @@ public Properties getTransactionalProducerConfig() {
110107
return props;
111108
}
112109

113-
public abstract <K, V> Collection<ConsumerRecord<K, V>> getAllRecordsFromTopic(
114-
Properties properties, String topic);
115-
116110
// -- offset handlers
117111

118112
/** Simple interface to commit and retrieve offsets. */

flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,10 @@
2121
import org.apache.flink.connector.kafka.testutils.KafkaUtil;
2222
import org.apache.flink.core.testutils.CommonTestUtils;
2323

24-
import org.apache.commons.collections.list.UnmodifiableList;
2524
import org.apache.kafka.clients.admin.AdminClient;
2625
import org.apache.kafka.clients.admin.NewTopic;
2726
import org.apache.kafka.clients.admin.TopicDescription;
2827
import org.apache.kafka.clients.admin.TopicListing;
29-
import org.apache.kafka.clients.consumer.ConsumerRecord;
3028
import org.apache.kafka.clients.consumer.KafkaConsumer;
3129
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
3230
import org.apache.kafka.common.TopicPartition;
@@ -42,7 +40,6 @@
4240

4341
import java.time.Duration;
4442
import java.util.ArrayList;
45-
import java.util.Collection;
4643
import java.util.Collections;
4744
import java.util.HashMap;
4845
import java.util.HashSet;
@@ -220,13 +217,6 @@ public String getVersion() {
220217
return DockerImageVersions.KAFKA;
221218
}
222219

223-
@Override
224-
@SuppressWarnings("unchecked")
225-
public <K, V> Collection<ConsumerRecord<K, V>> getAllRecordsFromTopic(
226-
Properties properties, String topic) {
227-
return UnmodifiableList.decorate(KafkaUtil.drainAllRecordsFromTopic(topic, properties));
228-
}
229-
230220
@Override
231221
public KafkaOffsetHandler createOffsetHandler() {
232222
return new KafkaOffsetHandlerImpl();

0 commit comments

Comments
 (0)