18
18
19
19
import java .nio .ByteBuffer ;
20
20
import java .time .Duration ;
21
- import java .util .AbstractMap .SimpleEntry ;
22
21
import java .util .ArrayList ;
23
22
import java .util .Arrays ;
24
23
import java .util .Collection ;
25
24
import java .util .Collections ;
25
+ import java .util .Comparator ;
26
26
import java .util .HashMap ;
27
27
import java .util .HashSet ;
28
28
import java .util .Iterator ;
@@ -305,8 +305,7 @@ public boolean isContainerPaused() {
305
305
306
306
@ Override
307
307
public boolean isPartitionPaused (TopicPartition topicPartition ) {
308
- return this .listenerConsumer != null && this .listenerConsumer
309
- .isPartitionPaused (topicPartition );
308
+ return this .listenerConsumer != null && this .listenerConsumer .isPartitionPaused (topicPartition );
310
309
}
311
310
312
311
@ Override
@@ -317,33 +316,28 @@ public boolean isInExpectedState() {
317
316
@ Override
318
317
public void enforceRebalance () {
319
318
this .thisOrParentContainer .enforceRebalanceRequested .set (true );
320
- KafkaMessageListenerContainer <K , V >.ListenerConsumer consumer = this .listenerConsumer ;
321
- if (consumer != null ) {
322
- consumer .wakeIfNecessary ();
323
- }
319
+ consumerWakeIfNecessary ();
324
320
}
325
321
326
322
@ Override
327
323
public void pause () {
328
324
super .pause ();
329
- KafkaMessageListenerContainer <K , V >.ListenerConsumer consumer = this .listenerConsumer ;
330
- if (consumer != null ) {
331
- consumer .wakeIfNecessary ();
332
- }
325
+ consumerWakeIfNecessary ();
333
326
}
334
327
335
328
@ Override
336
329
public void resume () {
337
330
super .resume ();
338
- KafkaMessageListenerContainer <K , V >.ListenerConsumer consumer = this .listenerConsumer ;
339
- if (consumer != null ) {
340
- consumer .wakeIfNecessary ();
341
- }
331
+ consumerWakeIfNecessary ();
342
332
}
343
333
344
334
@ Override
345
335
public void resumePartition (TopicPartition topicPartition ) {
346
336
super .resumePartition (topicPartition );
337
+ consumerWakeIfNecessary ();
338
+ }
339
+
340
+ private void consumerWakeIfNecessary () {
347
341
KafkaMessageListenerContainer <K , V >.ListenerConsumer consumer = this .listenerConsumer ;
348
342
if (consumer != null ) {
349
343
consumer .wakeIfNecessary ();
@@ -422,15 +416,11 @@ private void checkAckMode(ContainerProperties containerProperties) {
422
416
}
423
417
424
418
private ListenerType determineListenerType (GenericMessageListener <?> listener ) {
425
- ListenerType listenerType = ListenerUtils .determineListenerType (listener );
426
- if (listener instanceof DelegatingMessageListener ) {
427
- Object delegating = listener ;
428
- while (delegating instanceof DelegatingMessageListener <?> dml ) {
429
- delegating = dml .getDelegate ();
430
- }
431
- listenerType = ListenerUtils .determineListenerType (delegating );
419
+ Object delegating = listener ;
420
+ while (delegating instanceof DelegatingMessageListener <?> dml ) {
421
+ delegating = dml .getDelegate ();
432
422
}
433
- return listenerType ;
423
+ return ListenerUtils . determineListenerType ( delegating ) ;
434
424
}
435
425
436
426
@ Override
@@ -1586,7 +1576,7 @@ private void fixTxOffsetsIfNeeded() {
1586
1576
this .lastCommits .forEach ((tp , oamd ) -> {
1587
1577
long position = this .consumer .position (tp );
1588
1578
Long saved = this .savedPositions .get (tp );
1589
- if (saved != null && saved . longValue () != position ) {
1579
+ if (saved != null && saved != position ) {
1590
1580
this .logger .debug (() -> "Skipping TX offset correction - seek(s) have been performed; "
1591
1581
+ "saved: " + this .savedPositions + ", "
1592
1582
+ "committed: " + oamd + ", "
@@ -1609,9 +1599,7 @@ private void fixTxOffsetsIfNeeded() {
1609
1599
}
1610
1600
else {
1611
1601
this .transactionTemplate .executeWithoutResult (status -> {
1612
- doSendOffsets (((KafkaResourceHolder ) TransactionSynchronizationManager
1613
- .getResource (this .kafkaTxManager .getProducerFactory ()))
1614
- .getProducer (), toFix );
1602
+ doSendOffsets (getTxProducer (), toFix );
1615
1603
});
1616
1604
}
1617
1605
}
@@ -2088,7 +2076,7 @@ private synchronized void ackInOrder(ConsumerRecord<K, V> cRecord) {
2088
2076
offs .remove (0 );
2089
2077
ConsumerRecord <K , V > recordToAck = cRecord ;
2090
2078
if (!deferred .isEmpty ()) {
2091
- Collections .sort (deferred , ( a , b ) -> Long . compare ( a . offset (), b . offset () ));
2079
+ deferred .sort (Comparator . comparingLong ( ConsumerRecord :: offset ));
2092
2080
while (!ObjectUtils .isEmpty (deferred ) && deferred .get (0 ).offset () == recordToAck .offset () + 1 ) {
2093
2081
recordToAck = deferred .remove (0 );
2094
2082
offs .remove (0 );
@@ -2195,9 +2183,7 @@ private void invokeBatchListenerInTx(final ConsumerRecords<K, V> records,
2195
2183
@ Override
2196
2184
public void doInTransactionWithoutResult (TransactionStatus s ) {
2197
2185
if (ListenerConsumer .this .kafkaTxManager != null ) {
2198
- ListenerConsumer .this .producer = ((KafkaResourceHolder ) TransactionSynchronizationManager
2199
- .getResource (ListenerConsumer .this .kafkaTxManager .getProducerFactory ()))
2200
- .getProducer (); // NOSONAR nullable
2186
+ ListenerConsumer .this .producer = getTxProducer ();
2201
2187
}
2202
2188
RuntimeException aborted = doInvokeBatchListener (records , recordList );
2203
2189
if (aborted != null ) {
@@ -2254,10 +2240,9 @@ protected void doInTransactionWithoutResult(TransactionStatus status) {
2254
2240
}
2255
2241
2256
2242
private List <ConsumerRecord <K , V >> createRecordList (final ConsumerRecords <K , V > records ) {
2257
- Iterator <ConsumerRecord <K , V >> iterator = records .iterator ();
2258
2243
List <ConsumerRecord <K , V >> list = new LinkedList <>();
2259
- while ( iterator . hasNext () ) {
2260
- list .add (iterator . next () );
2244
+ for ( ConsumerRecord < K , V > record : records ) {
2245
+ list .add (record );
2261
2246
}
2262
2247
return list ;
2263
2248
}
@@ -2324,9 +2309,7 @@ private void commitOffsetsIfNeededAfterHandlingError(final ConsumerRecords<K, V>
2324
2309
|| this .producer != null ) {
2325
2310
if (this .remainingRecords != null ) {
2326
2311
ConsumerRecord <K , V > firstUncommitted = this .remainingRecords .iterator ().next ();
2327
- Iterator <ConsumerRecord <K , V >> it = records .iterator ();
2328
- while (it .hasNext ()) {
2329
- ConsumerRecord <K , V > next = it .next ();
2312
+ for (ConsumerRecord <K , V > next : records ) {
2330
2313
if (!next .equals (firstUncommitted )) {
2331
2314
this .acks .add (next );
2332
2315
}
@@ -2433,7 +2416,7 @@ private void invokeBatchOnMessageWithRecordsOrList(final ConsumerRecords<K, V> r
2433
2416
ConsumerRecords <K , V > records = recordsArg ;
2434
2417
List <ConsumerRecord <K , V >> recordList = recordListArg ;
2435
2418
if (this .listenerinfo != null ) {
2436
- records .iterator (). forEachRemaining (this ::listenerInfo );
2419
+ records .forEach (this ::listenerInfo );
2437
2420
}
2438
2421
if (this .batchInterceptor != null ) {
2439
2422
records = this .batchInterceptor .intercept (recordsArg , this .consumer );
@@ -2516,7 +2499,6 @@ private void invokeRecordListener(final ConsumerRecords<K, V> records) {
2516
2499
* Invoke the listener with each record in a separate transaction.
2517
2500
* @param records the records.
2518
2501
*/
2519
- @ SuppressWarnings (RAWTYPES ) // NOSONAR complexity
2520
2502
private void invokeRecordListenerInTx (final ConsumerRecords <K , V > records ) {
2521
2503
Iterator <ConsumerRecord <K , V >> iterator = records .iterator ();
2522
2504
while (iterator .hasNext ()) {
@@ -2561,9 +2543,7 @@ private void invokeInTransaction(Iterator<ConsumerRecord<K, V>> iterator, final
2561
2543
@ Override
2562
2544
public void doInTransactionWithoutResult (TransactionStatus s ) {
2563
2545
if (ListenerConsumer .this .kafkaTxManager != null ) {
2564
- ListenerConsumer .this .producer = ((KafkaResourceHolder ) TransactionSynchronizationManager
2565
- .getResource (ListenerConsumer .this .kafkaTxManager .getProducerFactory ()))
2566
- .getProducer (); // NOSONAR
2546
+ ListenerConsumer .this .producer = getTxProducer ();
2567
2547
}
2568
2548
RuntimeException aborted = doInvokeRecordListener (cRecord , iterator );
2569
2549
if (aborted != null ) {
@@ -2579,9 +2559,7 @@ private void recordAfterRollback(Iterator<ConsumerRecord<K, V>> iterator, final
2579
2559
2580
2560
List <ConsumerRecord <K , V >> unprocessed = new ArrayList <>();
2581
2561
unprocessed .add (cRecord );
2582
- while (iterator .hasNext ()) {
2583
- unprocessed .add (iterator .next ());
2584
- }
2562
+ iterator .forEachRemaining (unprocessed ::add );
2585
2563
@ SuppressWarnings (UNCHECKED )
2586
2564
AfterRollbackProcessor <K , V > afterRollbackProcessorToUse =
2587
2565
(AfterRollbackProcessor <K , V >) getAfterRollbackProcessor ();
@@ -2639,11 +2617,10 @@ private void doInvokeWithRecords(final ConsumerRecords<K, V> records) {
2639
2617
private boolean checkImmediatePause (Iterator <ConsumerRecord <K , V >> iterator ) {
2640
2618
if (isPauseRequested () && this .pauseImmediate ) {
2641
2619
Map <TopicPartition , List <ConsumerRecord <K , V >>> remaining = new LinkedHashMap <>();
2642
- while (iterator .hasNext ()) {
2643
- ConsumerRecord <K , V > next = iterator .next ();
2620
+ iterator .forEachRemaining (next -> {
2644
2621
remaining .computeIfAbsent (new TopicPartition (next .topic (), next .partition ()),
2645
- tp -> new ArrayList <ConsumerRecord < K , V > >()).add (next );
2646
- }
2622
+ tp -> new ArrayList <>()).add (next );
2623
+ });
2647
2624
if (!remaining .isEmpty ()) {
2648
2625
this .remainingRecords = new ConsumerRecords <>(remaining );
2649
2626
return true ;
@@ -2712,9 +2689,7 @@ private void handleNack(final ConsumerRecords<K, V> records, final ConsumerRecor
2712
2689
processCommits ();
2713
2690
}
2714
2691
List <ConsumerRecord <?, ?>> list = new ArrayList <>();
2715
- Iterator <ConsumerRecord <K , V >> iterator2 = records .iterator ();
2716
- while (iterator2 .hasNext ()) {
2717
- ConsumerRecord <K , V > next = iterator2 .next ();
2692
+ for (ConsumerRecord <K , V > next : records ) {
2718
2693
if (!list .isEmpty () || recordsEqual (cRecord , next )) {
2719
2694
list .add (next );
2720
2695
}
@@ -2755,6 +2730,13 @@ private void pauseForNackSleep() {
2755
2730
this .nackSleepDurationMillis = -1 ;
2756
2731
}
2757
2732
2733
+ @ SuppressWarnings (RAWTYPES )
2734
+ private Producer <?, ?> getTxProducer () {
2735
+ return ((KafkaResourceHolder ) TransactionSynchronizationManager
2736
+ .getResource (ListenerConsumer .this .kafkaTxManager .getProducerFactory ()))
2737
+ .getProducer (); // NOSONAR
2738
+ }
2739
+
2758
2740
/**
2759
2741
* Actually invoke the listener.
2760
2742
* @param cRecord the record.
@@ -2911,9 +2893,7 @@ private void invokeErrorHandler(final ConsumerRecord<K, V> cRecord,
2911
2893
}
2912
2894
List <ConsumerRecord <?, ?>> records = new ArrayList <>();
2913
2895
records .add (cRecord );
2914
- while (iterator .hasNext ()) {
2915
- records .add (iterator .next ());
2916
- }
2896
+ iterator .forEachRemaining (records ::add );
2917
2897
this .commonErrorHandler .handleRemaining (rte , records , this .consumer ,
2918
2898
KafkaMessageListenerContainer .this .thisOrParentContainer );
2919
2899
}
@@ -2929,12 +2909,9 @@ private void invokeErrorHandler(final ConsumerRecord<K, V> cRecord,
2929
2909
Map <TopicPartition , List <ConsumerRecord <K , V >>> records = new LinkedHashMap <>();
2930
2910
if (!handled ) {
2931
2911
records .computeIfAbsent (new TopicPartition (cRecord .topic (), cRecord .partition ()),
2932
- tp -> new ArrayList <ConsumerRecord <K , V >>()).add (cRecord );
2933
- while (iterator .hasNext ()) {
2934
- ConsumerRecord <K , V > next = iterator .next ();
2935
- records .computeIfAbsent (new TopicPartition (next .topic (), next .partition ()),
2936
- tp -> new ArrayList <ConsumerRecord <K , V >>()).add (next );
2937
- }
2912
+ tp -> new ArrayList <>()).add (cRecord );
2913
+ iterator .forEachRemaining (next -> records .computeIfAbsent (
2914
+ new TopicPartition (next .topic (), next .partition ()), tp -> new ArrayList <>()).add (next ));
2938
2915
}
2939
2916
if (!records .isEmpty ()) {
2940
2917
this .remainingRecords = new ConsumerRecords <>(records );
@@ -3201,9 +3178,7 @@ private void initPartitionsIfNeeded() {
3201
3178
doInitialSeeks (partitions , beginnings , ends );
3202
3179
if (this .consumerSeekAwareListener != null ) {
3203
3180
this .consumerSeekAwareListener .onPartitionsAssigned (this .definedPartitions .keySet ().stream ()
3204
- .map (tp -> new SimpleEntry <>(tp , this .consumer .position (tp )))
3205
- .collect (Collectors .toMap (entry -> entry .getKey (), entry -> entry .getValue ())),
3206
- this .seekCallback );
3181
+ .collect (Collectors .toMap (tp -> tp , this .consumer ::position )), this .seekCallback );
3207
3182
}
3208
3183
}
3209
3184
@@ -3884,20 +3859,13 @@ private Long computeBackwardWhereTo(long offset, boolean toCurrent, TopicPartiti
3884
3859
}
3885
3860
3886
3861
3887
- private static final class OffsetMetadata {
3888
-
3889
- final Long offset ; // NOSONAR
3890
-
3891
- final boolean relativeToCurrent ; // NOSONAR
3892
-
3893
- final SeekPosition seekPosition ; // NOSONAR
3894
-
3895
- OffsetMetadata (Long offset , boolean relativeToCurrent , SeekPosition seekPosition ) {
3896
- this .offset = offset ;
3897
- this .relativeToCurrent = relativeToCurrent ;
3898
- this .seekPosition = seekPosition ;
3899
- }
3900
-
3862
+ /**
3863
+ * Offset metadata record.
3864
+ * @param offset current offset.
3865
+ * @param relativeToCurrent relative to current.
3866
+ * @param seekPosition seek position strategy.
3867
+ */
3868
+ private record OffsetMetadata (Long offset , boolean relativeToCurrent , SeekPosition seekPosition ) {
3901
3869
}
3902
3870
3903
3871
private class StopCallback implements BiConsumer <Object , Throwable > {
0 commit comments