Skip to content

Commit 9794c58

Browse files
Wzy19930507Zhiyang.Wang1garyrussell
authored
GH-2862: Add Option to Log Recovery to DLPR (#2869)
* GH-2862: Add Option to Log Recovery to DLPR Resolves #2862 * add option logRecoveryRecord in `annotation-error-handling.adoc` Fix Javadoc for CommonErrorHandler Delete unused code in SerializationUtils * Fix since. --------- Co-authored-by: Zhiyang.Wang1 <[email protected]> Co-authored-by: Gary Russell <[email protected]>
1 parent 6166da3 commit 9794c58

File tree

5 files changed

+19
-3
lines changed

5 files changed

+19
-3
lines changed

spring-kafka-docs/src/main/antora/modules/ROOT/pages/kafka/annotation-error-handling.adoc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -729,6 +729,8 @@ Starting with version 2.7, the recoverer checks that the partition selected by t
729729
If the partition is not present, the partition in the `ProducerRecord` is set to `null`, allowing the `KafkaProducer` to select the partition.
730730
You can disable this check by setting the `verifyPartition` property to `false`.
731731

732+
Starting with version 3.1, setting the `logRecoveryRecord` property to `true` will log the recovery record and exception.
733+
732734
[[dlpr-headers]]
733735
== Managing Dead Letter Record Headers
734736

spring-kafka/src/main/java/org/springframework/kafka/listener/CommonErrorHandler.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,15 +91,15 @@ default boolean handleOne(Exception thrownException, ConsumerRecord<?, ?> record
9191
}
9292

9393
/**
94-
* Handle the exception for a record listener when {@link #remainingRecords()} returns
94+
* Handle the exception for a record listener when {@link #seeksAfterHandling()} returns
9595
* true. The failed record and all the remaining records from the poll are passed in.
9696
* Usually used when the error handler performs seeks so that the remaining records
9797
* will be redelivered on the next poll.
9898
* @param thrownException the exception.
9999
* @param records the remaining records including the one that failed.
100100
* @param consumer the consumer.
101101
* @param container the container.
102-
* @see #remainingRecords()
102+
* @see #seeksAfterHandling()
103103
*/
104104
default void handleRemaining(Exception thrownException, List<ConsumerRecord<?, ?>> records, Consumer<?, ?> consumer,
105105
MessageListenerContainer container) {

spring-kafka/src/main/java/org/springframework/kafka/listener/DeadLetterPublishingRecoverer.java

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,8 @@ public class DeadLetterPublishingRecoverer extends ExceptionClassifier implement
113113

114114
private boolean skipSameTopicFatalExceptions = true;
115115

116+
private boolean logRecoveryRecord = false;
117+
116118
private ExceptionHeadersCreator exceptionHeadersCreator = this::addExceptionInfoHeaders;
117119

118120
private Supplier<HeaderNames> headerNamesSupplier = () -> HeaderNames.Builder
@@ -400,6 +402,15 @@ public void setSkipSameTopicFatalExceptions(boolean skipSameTopicFatalExceptions
400402
this.skipSameTopicFatalExceptions = skipSameTopicFatalExceptions;
401403
}
402404

405+
/**
406+
* Set to true if you want to log recovery record and exception.
407+
* @param logRecoveryRecord true to log record and exception.
408+
* @since 3.1
409+
*/
410+
public void setLogRecoveryRecord(boolean logRecoveryRecord) {
411+
this.logRecoveryRecord = logRecoveryRecord;
412+
}
413+
403414
/**
404415
* Set a {@link ExceptionHeadersCreator} implementation to completely take over
405416
* setting the exception headers in the output record. Disables all headers that are
@@ -503,6 +514,9 @@ public void accept(ConsumerRecord<?, ?> record, @Nullable Consumer<?, ?> consume
503514
+ " and the destination resolver routed back to the same topic");
504515
return;
505516
}
517+
if (this.logRecoveryRecord) {
518+
this.logger.info(exception, () -> "Recovery record " + KafkaUtils.format(record));
519+
}
506520
if (consumer != null && this.verifyPartition) {
507521
tp = checkPartition(tp, consumer);
508522
}

spring-kafka/src/main/java/org/springframework/kafka/retrytopic/DeadLetterPublishingRecovererFactory.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,7 @@ public DeadLetterPublishingRecoverer create(String mainListenerId) {
230230
recoverer.setAppendOriginalHeaders(false);
231231
recoverer.setThrowIfNoDestinationReturned(false);
232232
recoverer.setSkipSameTopicFatalExceptions(false);
233+
recoverer.setLogRecoveryRecord(false);
233234
this.recovererCustomizer.accept(recoverer);
234235
this.fatalExceptions.forEach(recoverer::addNotRetryableExceptions);
235236
this.nonFatalExceptions.forEach(recoverer::removeClassification);

spring-kafka/src/main/java/org/springframework/kafka/support/serializer/SerializationUtils.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,6 @@ public static DeserializationException getExceptionFromHeader(final ConsumerReco
201201
return null;
202202
}
203203
if (header != null) {
204-
byte[] value = header.value();
205204
DeserializationException exception = byteArrayToDeserializationException(logger, header);
206205
if (exception != null) {
207206
Headers headers = new RecordHeaders(record.headers().toArray());

0 commit comments

Comments
 (0)