Skip to content

Commit a1c2092

Browse files
garyrussellartembilan
authored andcommitted
GH-2269: Improve DLPR Extensibility
Resolves #2269 - add getters for fields used in protected methods - change more methods to protected **Cherry-pick to 2.9.x, 2.8.x, 2.7.x** If it doesn't cherry-pick cleanly, I will back-port. # Conflicts: # spring-kafka/src/main/java/org/springframework/kafka/listener/DeadLetterPublishingRecoverer.java
1 parent a08267d commit a1c2092

File tree

1 file changed

+114
-53
lines changed

1 file changed

+114
-53
lines changed

spring-kafka/src/main/java/org/springframework/kafka/listener/DeadLetterPublishingRecoverer.java

Lines changed: 114 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright 2018-2021 the original author or authors.
2+
* Copyright 2018-2022 the original author or authors.
33
*
44
* Licensed under the Apache License, Version 2.0 (the "License");
55
* you may not use this file except in compliance with the License.
@@ -69,7 +69,7 @@ public class DeadLetterPublishingRecoverer implements ConsumerAwareRecordRecover
6969
protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(getClass())); // NOSONAR
7070

7171
private static final BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition>
72-
DEFAULT_DESTINATION_RESOLVER = (cr, e) -> new TopicPartition(cr.topic() + ".DLT", cr.partition());
72+
DEFAULT_DESTINATION_RESOLVER = (cr, e) -> new TopicPartition(cr.topic() + ".DLT", cr.partition());
7373

7474
private static final long FIVE = 5L;
7575

@@ -157,7 +157,6 @@ public DeadLetterPublishingRecoverer(Map<Class<?>, KafkaOperations<? extends Obj
157157
* @param templates the {@link KafkaOperations}s to use for publishing.
158158
* @param destinationResolver the resolving function.
159159
*/
160-
@SuppressWarnings("unchecked")
161160
public DeadLetterPublishingRecoverer(Map<Class<?>, KafkaOperations<? extends Object, ? extends Object>> templates,
162161
BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver) {
163162

@@ -170,28 +169,28 @@ public DeadLetterPublishingRecoverer(Map<Class<?>, KafkaOperations<? extends Obj
170169
this.transactional = firstTemplate.isTransactional();
171170
Boolean tx = this.transactional;
172171
Assert.isTrue(templates.values()
173-
.stream()
174-
.map(t -> t.isTransactional())
175-
.allMatch(t -> t.equals(tx)), "All templates must have the same setting for transactional");
172+
.stream()
173+
.map(KafkaOperations::isTransactional)
174+
.allMatch(t -> t.equals(tx)), "All templates must have the same setting for transactional");
176175
this.destinationResolver = destinationResolver;
177176
}
178177

179178
/**
180-
* Create an instance with a template resolving function that receives the failed
181-
* consumer record and the exception and returns a {@link KafkaOperations} and a
182-
* flag on whether or not the publishing from this instance will be transactional
183-
* or not. Also receives a destination resolving function that works similarly but
184-
* returns a {@link TopicPartition} instead. If the partition in the {@link TopicPartition}
185-
* is less than 0, no partition is set when publishing to the topic.
186-
*
187-
* @param templateResolver the function that resolver the {@link KafkaOperations} to use for publishing.
188-
* @param transactional whether or not publishing by this instance should be transactional
189-
* @param destinationResolver the resolving function.
190-
* @since 2.7
191-
*/
179+
* Create an instance with a template resolving function that receives the failed
180+
* consumer record and the exception and returns a {@link KafkaOperations} and a
181+
* flag on whether the publishing from this instance will be transactional
182+
* or not. Also receives a destination resolving function that works similarly but
183+
* returns a {@link TopicPartition} instead. If the partition in the {@link TopicPartition}
184+
* is less than 0, no partition is set when publishing to the topic.
185+
*
186+
* @param templateResolver the function that resolver the {@link KafkaOperations} to use for publishing.
187+
* @param transactional whether publishing by this instance should be transactional
188+
* @param destinationResolver the resolving function.
189+
* @since 2.7
190+
*/
192191
public DeadLetterPublishingRecoverer(Function<ProducerRecord<?, ?>, KafkaOperations<?, ?>> templateResolver,
193-
boolean transactional,
194-
BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver) {
192+
boolean transactional,
193+
BiFunction<ConsumerRecord<?, ?>, Exception, TopicPartition> destinationResolver) {
195194

196195
Assert.notNull(templateResolver, "The templateResolver cannot be null");
197196
Assert.notNull(destinationResolver, "The destinationResolver cannot be null");
@@ -293,7 +292,18 @@ public void setFailIfSendResultIsError(boolean failIfSendResultIsError) {
293292
}
294293

295294
/**
296-
* Set the minumum time to wait for message sending. Default is the producer
295+
* If true, wait for the send result and throw an exception if it fails.
296+
* It will wait for the milliseconds specified in waitForSendResultTimeout for the result.
297+
* @return true to wait.
298+
* @since 2.7.14
299+
* @see #setWaitForSendResultTimeout(Duration)
300+
*/
301+
protected boolean isFailIfSendResultIsError() {
302+
return this.failIfSendResultIsError;
303+
}
304+
305+
/**
306+
* Set the minimum time to wait for message sending. Default is the producer
297307
* configuration {@code delivery.timeout.ms} plus the {@link #setTimeoutBuffer(long)}.
298308
* @param waitForSendResultTimeout the timeout.
299309
* @since 2.7
@@ -305,8 +315,9 @@ public void setWaitForSendResultTimeout(Duration waitForSendResultTimeout) {
305315
}
306316

307317
/**
308-
* Set the number of milliseconds to add to the producer configuration {@code delivery.timeout.ms}
309-
* property to avoid timing out before the Kafka producer. Default 5000.
318+
* Set the number of milliseconds to add to the producer configuration
319+
* {@code delivery.timeout.ms} property to avoid timing out before the Kafka producer.
320+
* Default 5000.
310321
* @param buffer the buffer.
311322
* @since 2.7
312323
* @see #setWaitForSendResultTimeout(Duration)
@@ -316,17 +327,36 @@ public void setTimeoutBuffer(long buffer) {
316327
}
317328

318329
/**
319-
* Set to true to remove previous exception headers and only retain headers for the
320-
* current exception. Default is false, which means all exception header values are
321-
* retained; this can cause a growth in record size when a record is republished many
322-
* times.
323-
* @param stripPreviousExceptionHeaders true to strip.
330+
* The number of milliseconds to add to the producer configuration
331+
* {@code delivery.timeout.ms} property to avoid timing out before the Kafka producer.
332+
* @return the buffer.
333+
* @since 2.7.14
334+
*/
335+
protected long getTimeoutBuffer() {
336+
return this.timeoutBuffer;
337+
}
338+
339+
/**
340+
* Set to false to retain previous exception headers as well as headers for the
341+
* current exception. Default is true, which means only the current headers are
342+
* retained; setting it to false this can cause a growth in record size when a record
343+
* is republished many times.
344+
* @param stripPreviousExceptionHeaders false to retain all.
324345
* @since 2.7.9
325346
*/
326347
public void setStripPreviousExceptionHeaders(boolean stripPreviousExceptionHeaders) {
327348
this.stripPreviousExceptionHeaders = stripPreviousExceptionHeaders;
328349
}
329350

351+
/**
352+
* True if publishing should run in a transaction.
353+
* @return true for transactional.
354+
* @since 2.7.14
355+
*/
356+
protected boolean isTransactional() {
357+
return this.transactional;
358+
}
359+
330360
@SuppressWarnings("unchecked")
331361
@Override
332362
public void accept(ConsumerRecord<?, ?> record, @Nullable Consumer<?, ?> consumer, Exception exception) {
@@ -341,7 +371,7 @@ public void accept(ConsumerRecord<?, ?> record, @Nullable Consumer<?, ?> consume
341371
DeserializationException vDeserEx = ListenerUtils.getExceptionFromHeader(record,
342372
ErrorHandlingDeserializer.VALUE_DESERIALIZER_EXCEPTION_HEADER, this.logger);
343373
DeserializationException kDeserEx = ListenerUtils.getExceptionFromHeader(record,
344-
ErrorHandlingDeserializer.KEY_DESERIALIZER_EXCEPTION_HEADER, this.logger);
374+
ErrorHandlingDeserializer.KEY_DESERIALIZER_EXCEPTION_HEADER, this.logger);
345375
Headers headers = new RecordHeaders(record.headers().toArray());
346376
addAndEnhanceHeaders(record, exception, vDeserEx, kDeserEx, headers);
347377
ProducerRecord<Object, Object> outRecord = createProducerRecord(record, tp, headers,
@@ -385,7 +415,7 @@ private void sendOrThrow(ProducerRecord<Object, Object> outRecord,
385415

386416
private void maybeThrow(ConsumerRecord<?, ?> record, Exception exception) {
387417
String message = String.format("No destination returned for record %s and exception %s. " +
388-
"failIfNoDestinationReturned: %s", ListenerUtils.recordToString(record), exception,
418+
"failIfNoDestinationReturned: %s", ListenerUtils.recordToString(record), exception,
389419
this.throwIfNoDestinationReturned);
390420
this.logger.warn(message);
391421
if (this.throwIfNoDestinationReturned) {
@@ -437,7 +467,7 @@ private TopicPartition checkPartition(TopicPartition tp, Consumer<?, ?> consumer
437467

438468
@SuppressWarnings("unchecked")
439469
private KafkaOperations<Object, Object> findTemplateForValue(@Nullable Object value,
440-
Map<Class<?>, KafkaOperations<?, ?>> templates) {
470+
Map<Class<?>, KafkaOperations<?, ?>> templates) {
441471
if (value == null) {
442472
KafkaOperations<?, ?> operations = templates.get(Void.class);
443473
if (operations == null) {
@@ -448,16 +478,16 @@ private KafkaOperations<Object, Object> findTemplateForValue(@Nullable Object va
448478
}
449479
}
450480
Optional<Class<?>> key = templates.keySet()
451-
.stream()
452-
.filter((k) -> k.isAssignableFrom(value.getClass()))
453-
.findFirst();
481+
.stream()
482+
.filter((k) -> k.isAssignableFrom(value.getClass()))
483+
.findFirst();
454484
if (key.isPresent()) {
455485
return (KafkaOperations<Object, Object>) templates.get(key.get());
456486
}
457487
this.logger.warn(() -> "Failed to find a template for " + value.getClass() + " attempting to use the last entry");
458488
return (KafkaOperations<Object, Object>) templates.values()
459489
.stream()
460-
.reduce((first, second) -> second)
490+
.reduce((first, second) -> second)
461491
.get();
462492
}
463493

@@ -509,7 +539,13 @@ protected void publish(ProducerRecord<Object, Object> outRecord, KafkaOperations
509539
}
510540
}
511541

512-
private void verifySendResult(KafkaOperations<Object, Object> kafkaTemplate,
542+
/**
543+
* Wait for the send future to complete.
544+
* @param kafkaTemplate the template used to send the record.
545+
* @param outRecord the record.
546+
* @param sendResult the future.
547+
*/
548+
protected void verifySendResult(KafkaOperations<Object, Object> kafkaTemplate,
513549
ProducerRecord<Object, Object> outRecord,
514550
@Nullable ListenableFuture<SendResult<Object, Object>> sendResult) {
515551

@@ -529,7 +565,14 @@ private void verifySendResult(KafkaOperations<Object, Object> kafkaTemplate,
529565
}
530566
}
531567

532-
private Duration determineSendTimeout(KafkaOperations<?, ?> template) {
568+
/**
569+
* Determine the send timeout based on the template's producer factory and
570+
* {@link #setWaitForSendResultTimeout(Duration)}.
571+
* @param template the template.
572+
* @return the timeout.
573+
* @since 2.7.14
574+
*/
575+
protected Duration determineSendTimeout(KafkaOperations<?, ?> template) {
533576
ProducerFactory<? extends Object, ? extends Object> producerFactory = template.getProducerFactory();
534577
if (producerFactory != null) { // NOSONAR - will only occur in mock tests
535578
Map<String, Object> props = producerFactory.getConfigurationProperties();
@@ -608,18 +651,18 @@ private String getStackTraceAsString(Throwable cause) {
608651
protected HeaderNames getHeaderNames() {
609652
return HeaderNames.Builder
610653
.original()
611-
.offsetHeader(KafkaHeaders.DLT_ORIGINAL_OFFSET)
612-
.timestampHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP)
613-
.timestampTypeHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE)
614-
.topicHeader(KafkaHeaders.DLT_ORIGINAL_TOPIC)
615-
.partitionHeader(KafkaHeaders.DLT_ORIGINAL_PARTITION)
654+
.offsetHeader(KafkaHeaders.DLT_ORIGINAL_OFFSET)
655+
.timestampHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP)
656+
.timestampTypeHeader(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE)
657+
.topicHeader(KafkaHeaders.DLT_ORIGINAL_TOPIC)
658+
.partitionHeader(KafkaHeaders.DLT_ORIGINAL_PARTITION)
616659
.exception()
617-
.keyExceptionFqcn(KafkaHeaders.DLT_KEY_EXCEPTION_FQCN)
618-
.exceptionFqcn(KafkaHeaders.DLT_EXCEPTION_FQCN)
619-
.keyExceptionMessage(KafkaHeaders.DLT_KEY_EXCEPTION_MESSAGE)
620-
.exceptionMessage(KafkaHeaders.DLT_EXCEPTION_MESSAGE)
621-
.keyExceptionStacktrace(KafkaHeaders.DLT_KEY_EXCEPTION_STACKTRACE)
622-
.exceptionStacktrace(KafkaHeaders.DLT_EXCEPTION_STACKTRACE)
660+
.keyExceptionFqcn(KafkaHeaders.DLT_KEY_EXCEPTION_FQCN)
661+
.exceptionFqcn(KafkaHeaders.DLT_EXCEPTION_FQCN)
662+
.keyExceptionMessage(KafkaHeaders.DLT_KEY_EXCEPTION_MESSAGE)
663+
.exceptionMessage(KafkaHeaders.DLT_EXCEPTION_MESSAGE)
664+
.keyExceptionStacktrace(KafkaHeaders.DLT_KEY_EXCEPTION_STACKTRACE)
665+
.exceptionStacktrace(KafkaHeaders.DLT_EXCEPTION_STACKTRACE)
623666
.build();
624667
}
625668

@@ -631,6 +674,7 @@ protected HeaderNames getHeaderNames() {
631674
public static class HeaderNames {
632675

633676
private final HeaderNames.Original original;
677+
634678
private final ExceptionInfo exceptionInfo;
635679

636680
HeaderNames(HeaderNames.Original original, ExceptionInfo exceptionInfo) {
@@ -639,10 +683,15 @@ public static class HeaderNames {
639683
}
640684

641685
static class Original {
686+
642687
private final String offsetHeader;
688+
643689
private final String timestampHeader;
690+
644691
private final String timestampTypeHeader;
692+
645693
private final String topicHeader;
694+
646695
private final String partitionHeader;
647696

648697
Original(String offsetHeader,
@@ -656,30 +705,37 @@ static class Original {
656705
this.topicHeader = topicHeader;
657706
this.partitionHeader = partitionHeader;
658707
}
708+
659709
}
660710

661711
static class ExceptionInfo {
662712

663713
private final String keyExceptionFqcn;
714+
664715
private final String exceptionFqcn;
716+
665717
private final String keyExceptionMessage;
718+
666719
private final String exceptionMessage;
720+
667721
private final String keyExceptionStacktrace;
722+
668723
private final String exceptionStacktrace;
669724

670725
ExceptionInfo(String keyExceptionFqcn,
671-
String exceptionFqcn,
672-
String keyExceptionMessage,
673-
String exceptionMessage,
674-
String keyExceptionStacktrace,
675-
String exceptionStacktrace) {
726+
String exceptionFqcn,
727+
String keyExceptionMessage,
728+
String exceptionMessage,
729+
String keyExceptionStacktrace,
730+
String exceptionStacktrace) {
676731
this.keyExceptionFqcn = keyExceptionFqcn;
677732
this.exceptionFqcn = exceptionFqcn;
678733
this.keyExceptionMessage = keyExceptionMessage;
679734
this.exceptionMessage = exceptionMessage;
680735
this.keyExceptionStacktrace = keyExceptionStacktrace;
681736
this.exceptionStacktrace = exceptionStacktrace;
682737
}
738+
683739
}
684740

685741

@@ -805,6 +861,7 @@ private DeadLetterPublishingRecoverer.HeaderNames.Original build() {
805861
this.topicHeader,
806862
this.partitionHeader);
807863
}
864+
808865
}
809866

810867
/**
@@ -919,7 +976,11 @@ public DeadLetterPublishingRecoverer.HeaderNames build() {
919976
this.keyExceptionStacktrace,
920977
this.exceptionStacktrace));
921978
}
979+
922980
}
981+
923982
}
983+
924984
}
985+
925986
}

0 commit comments

Comments
 (0)