Skip to content

Commit 7b0cd0f

Browse files
committed
Fix compiler warnings
Related to #3019 * Add `@SuppressWarnings("serial")` to `KafkaTemplate.SkipAbortException` * Move logic in the `FailedBatchProcessor.seekOrRecover()` out of `finally` block. Essentially, swallow commit exception and follow with seeks **Auto-cherry-pick to `3.1.x`**
1 parent 265e55f commit 7b0cd0f

File tree

2 files changed

+51
-47
lines changed

2 files changed

+51
-47
lines changed

spring-kafka/src/main/java/org/springframework/kafka/core/KafkaTemplate.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -982,6 +982,7 @@ public void destroy() {
982982
}
983983
}
984984

985+
@SuppressWarnings("serial")
985986
private static final class SkipAbortException extends RuntimeException {
986987

987988
SkipAbortException(Throwable cause) {

spring-kafka/src/main/java/org/springframework/kafka/listener/FailedBatchProcessor.java

Lines changed: 50 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,12 @@
4444
* the listener throws a {@link BatchListenerFailedException}, the offsets prior to the
4545
* failed record are committed and the remaining records have seeks performed. When the
4646
* retries are exhausted, the failed record is sent to the recoverer instead of being
47-
* included in the seeks. If other exceptions are thrown processing is delegated to the
48-
* fallback handler.
47+
* included in the seeks. If other exceptions are thrown, the fallback handler takes the processing.
4948
*
5049
* @author Gary Russell
5150
* @author Francois Rosiere
5251
* @author Wang Zhiyang
52+
* @author Artem Bilan
5353
* @since 2.8
5454
*
5555
*/
@@ -63,10 +63,10 @@ public abstract class FailedBatchProcessor extends FailedRecordProcessor {
6363
* Construct an instance with the provided properties.
6464
* @param recoverer the recoverer.
6565
* @param backOff the back off.
66-
* @param fallbackHandler the fall back handler.
66+
* @param fallbackHandler the fallback handler.
6767
*/
6868
public FailedBatchProcessor(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer, BackOff backOff,
69-
CommonErrorHandler fallbackHandler) {
69+
CommonErrorHandler fallbackHandler) {
7070

7171
this(recoverer, backOff, null, fallbackHandler);
7272
}
@@ -76,11 +76,11 @@ public FailedBatchProcessor(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception
7676
* @param recoverer the recoverer.
7777
* @param backOff the back off.
7878
* @param backOffHandler the {@link BackOffHandler}
79-
* @param fallbackHandler the fall back handler.
79+
* @param fallbackHandler the fallback handler.
8080
* @since 2.9
8181
*/
8282
public FailedBatchProcessor(@Nullable BiConsumer<ConsumerRecord<?, ?>, Exception> recoverer, BackOff backOff,
83-
@Nullable BackOffHandler backOffHandler, CommonErrorHandler fallbackHandler) {
83+
@Nullable BackOffHandler backOffHandler, CommonErrorHandler fallbackHandler) {
8484

8585
super(recoverer, backOff, backOffHandler);
8686
this.fallbackBatchHandler = fallbackHandler;
@@ -103,7 +103,7 @@ public void setLogLevel(Level logLevel) {
103103
}
104104

105105
/**
106-
* Set to false to not reclassify the exception if different from the previous
106+
* Set to {@code false} to not reclassify the exception if different from the previous
107107
* failure. If the changed exception is classified as retryable, the existing back off
108108
* sequence is used; a new sequence is not started. Default true. Only applies when
109109
* the fallback batch error handler (for exceptions other than
@@ -195,7 +195,7 @@ private void fallback(Exception thrownException, ConsumerRecords<?, ?> data, Con
195195
this.fallbackBatchHandler.handleBatch(thrownException, data, consumer, container, invokeListener);
196196
}
197197

198-
private int findIndex(ConsumerRecords<?, ?> data, ConsumerRecord<?, ?> record) {
198+
private int findIndex(ConsumerRecords<?, ?> data, @Nullable ConsumerRecord<?, ?> record) {
199199
if (record == null) {
200200
return -1;
201201
}
@@ -229,57 +229,60 @@ private <K, V> ConsumerRecords<K, V> seekOrRecover(Exception thrownException, @N
229229
remaining.add(datum);
230230
}
231231
}
232+
232233
try {
233-
if (offsets.size() > 0) {
234+
if (!offsets.isEmpty()) {
234235
commit(consumer, container, offsets);
235236
}
236237
}
237-
finally {
238-
if (isSeekAfterError()) {
239-
if (remaining.size() > 0) {
240-
SeekUtils.seekOrRecover(thrownException, remaining, consumer, container, false,
241-
getFailureTracker(), this.logger, getLogLevel());
242-
ConsumerRecord<?, ?> recovered = remaining.get(0);
243-
commit(consumer, container,
244-
Collections.singletonMap(new TopicPartition(recovered.topic(), recovered.partition()),
245-
ListenerUtils.createOffsetAndMetadata(container, recovered.offset() + 1)));
246-
if (remaining.size() > 1) {
247-
throw new KafkaException("Seek to current after exception", getLogLevel(), thrownException);
248-
}
238+
catch (Exception ex) {
239+
// Ignore and follow with seek below
240+
}
241+
242+
if (isSeekAfterError()) {
243+
if (!remaining.isEmpty()) {
244+
SeekUtils.seekOrRecover(thrownException, remaining, consumer, container, false,
245+
getFailureTracker(), this.logger, getLogLevel());
246+
ConsumerRecord<?, ?> recovered = remaining.get(0);
247+
commit(consumer, container,
248+
Collections.singletonMap(new TopicPartition(recovered.topic(), recovered.partition()),
249+
ListenerUtils.createOffsetAndMetadata(container, recovered.offset() + 1)));
250+
if (remaining.size() > 1) {
251+
throw new KafkaException("Seek to current after exception", getLogLevel(), thrownException);
249252
}
250-
return ConsumerRecords.empty();
251253
}
252-
else {
253-
if (remaining.size() > 0) {
254-
try {
255-
if (getFailureTracker().recovered(remaining.get(0), thrownException, container,
256-
consumer)) {
257-
remaining.remove(0);
258-
}
259-
}
260-
catch (Exception e) {
261-
if (SeekUtils.isBackoffException(thrownException)) {
262-
this.logger.debug(e, () -> KafkaUtils.format(remaining.get(0))
263-
+ " included in remaining due to retry back off " + thrownException);
264-
}
265-
else {
266-
this.logger.error(e, KafkaUtils.format(remaining.get(0))
267-
+ " included in remaining due to " + thrownException);
268-
}
254+
return ConsumerRecords.empty();
255+
}
256+
else {
257+
if (!remaining.isEmpty()) {
258+
try {
259+
if (getFailureTracker().recovered(remaining.get(0), thrownException, container,
260+
consumer)) {
261+
remaining.remove(0);
269262
}
270263
}
271-
if (remaining.isEmpty()) {
272-
return ConsumerRecords.empty();
264+
catch (Exception e) {
265+
if (SeekUtils.isBackoffException(thrownException)) {
266+
this.logger.debug(e, () -> KafkaUtils.format(remaining.get(0))
267+
+ " included in remaining due to retry back off " + thrownException);
268+
}
269+
else {
270+
this.logger.error(e, KafkaUtils.format(remaining.get(0))
271+
+ " included in remaining due to " + thrownException);
272+
}
273273
}
274-
Map<TopicPartition, List<ConsumerRecord<K, V>>> remains = new HashMap<>();
275-
remaining.forEach(rec -> remains.computeIfAbsent(new TopicPartition(rec.topic(), rec.partition()),
276-
tp -> new ArrayList<>()).add((ConsumerRecord<K, V>) rec));
277-
return new ConsumerRecords<>(remains);
278274
}
275+
if (remaining.isEmpty()) {
276+
return ConsumerRecords.empty();
277+
}
278+
Map<TopicPartition, List<ConsumerRecord<K, V>>> remains = new HashMap<>();
279+
remaining.forEach(rec -> remains.computeIfAbsent(new TopicPartition(rec.topic(), rec.partition()),
280+
tp -> new ArrayList<>()).add((ConsumerRecord<K, V>) rec));
281+
return new ConsumerRecords<>(remains);
279282
}
280283
}
281284

282-
private void commit(Consumer<?, ?> consumer, MessageListenerContainer container,
285+
private static void commit(Consumer<?, ?> consumer, MessageListenerContainer container,
283286
Map<TopicPartition, OffsetAndMetadata> offsets) {
284287

285288
ContainerProperties properties = container.getContainerProperties();
@@ -296,7 +299,7 @@ private void commit(Consumer<?, ?> consumer, MessageListenerContainer container,
296299
}
297300

298301
@Nullable
299-
private BatchListenerFailedException getBatchListenerFailedException(Throwable throwableArg) {
302+
private static BatchListenerFailedException getBatchListenerFailedException(@Nullable Throwable throwableArg) {
300303
if (throwableArg == null || throwableArg instanceof BatchListenerFailedException) {
301304
return (BatchListenerFailedException) throwableArg;
302305
}

0 commit comments

Comments
 (0)