Skip to content

Commit bf23e9c

Browse files
committed
Fix NPE if no offsets were found for specified timestamp
1 parent dd5389c commit bf23e9c

File tree

1 file changed

+43
-24
lines changed

1 file changed

+43
-24
lines changed

kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/service/ConsumingService.java

Lines changed: 43 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,14 @@
88
import java.util.List;
99
import java.util.Map;
1010
import java.util.Optional;
11+
import java.util.function.Function;
1112
import java.util.stream.Collectors;
1213

1314
import org.apache.commons.lang3.StringUtils;
1415
import org.apache.kafka.clients.consumer.ConsumerRecord;
1516
import org.apache.kafka.clients.consumer.ConsumerRecords;
1617
import org.apache.kafka.clients.consumer.KafkaConsumer;
18+
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
1719
import org.apache.kafka.common.TopicPartition;
1820
import org.apache.kafka.common.utils.Bytes;
1921
import org.springframework.stereotype.Service;
@@ -101,8 +103,9 @@ private static class RecordEmitter {
101103

102104
public void emit(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
103105
try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
104-
assignPartitions(consumer);
105-
seekOffsets(consumer);
106+
// assignPartitions(consumer);
107+
// seekOffsets(consumer);
108+
assignAndSeek(consumer);
106109
int pollsCount = 0;
107110
while (!sink.isCancelled() && ++pollsCount < MAX_POLLS_COUNT) {
108111
ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
@@ -131,39 +134,55 @@ private List<TopicPartition> getRequestedPartitions() {
131134
.collect(Collectors.toList());
132135
}
133136

134-
private void assignPartitions(KafkaConsumer<Bytes, Bytes> consumer) {
135-
List<TopicPartition> partitions = getRequestedPartitions();
136-
137-
consumer.assign(partitions);
138-
}
139-
140-
private void seekOffsets(KafkaConsumer<Bytes, Bytes> consumer) {
137+
private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
141138
SeekType seekType = consumerPosition.getSeekType();
142139
switch (seekType) {
143140
case OFFSET:
144-
consumerPosition.getSeekTo().forEach((partition, offset) -> {
145-
TopicPartition topicPartition = new TopicPartition(topic, partition);
146-
consumer.seek(topicPartition, offset);
147-
});
141+
assignAndSeekForOffset(consumer);
148142
break;
149143
case TIMESTAMP:
150-
Map<TopicPartition, Long> timestampsToSearch = consumerPosition.getSeekTo().entrySet().stream()
151-
.collect(Collectors.toMap(
152-
partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
153-
Map.Entry::getValue
154-
));
155-
consumer.offsetsForTimes(timestampsToSearch)
156-
.forEach((topicPartition, offsetAndTimestamp) ->
157-
consumer.seek(topicPartition, offsetAndTimestamp.offset())
158-
);
144+
assignAndSeekForTimestamp(consumer);
159145
break;
160146
case BEGINNING:
161-
List<TopicPartition> partitions = getRequestedPartitions();
162-
consumer.seekToBeginning(partitions);
147+
assignAndSeekFromBeginning(consumer);
163148
break;
164149
default:
165150
throw new IllegalArgumentException("Unknown seekType: " + seekType);
166151
}
167152
}
153+
154+
private void assignAndSeekForOffset(KafkaConsumer<Bytes, Bytes> consumer) {
155+
List<TopicPartition> partitions = getRequestedPartitions();
156+
consumer.assign(partitions);
157+
consumerPosition.getSeekTo().forEach((partition, offset) -> {
158+
TopicPartition topicPartition = new TopicPartition(topic, partition);
159+
consumer.seek(topicPartition, offset);
160+
});
161+
}
162+
163+
private void assignAndSeekForTimestamp(KafkaConsumer<Bytes, Bytes> consumer) {
164+
Map<TopicPartition, Long> timestampsToSearch = consumerPosition.getSeekTo().entrySet().stream()
165+
.collect(Collectors.toMap(
166+
partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
167+
Map.Entry::getValue
168+
));
169+
Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
170+
.entrySet().stream()
171+
.filter(e -> e.getValue() != null)
172+
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
173+
174+
if (offsetsForTimestamps.isEmpty()) {
175+
throw new IllegalArgumentException("No offsets were found for requested timestamps");
176+
}
177+
178+
consumer.assign(offsetsForTimestamps.keySet());
179+
offsetsForTimestamps.forEach(consumer::seek);
180+
}
181+
182+
private void assignAndSeekFromBeginning(KafkaConsumer<Bytes, Bytes> consumer) {
183+
List<TopicPartition> partitions = getRequestedPartitions();
184+
consumer.assign(partitions);
185+
consumer.seekToBeginning(partitions);
186+
}
168187
}
169188
}

0 commit comments

Comments
 (0)