Skip to content

Commit 276275b

Browse files
Wzy19930507sobychacko
authored andcommitted
Fix testInvokeRecordInterceptorAllSkipped()
* When `AckMode.RECORD` and early is `false,` and the last execute method is `RecordInterceptor.afterRecord`, we missed a `CountDownLatch` in KMLCT#testInvokeRecordInterceptorAllSkipped that must wait for `RecordInterceptor.afterRecord` to complete. (cherry-picked from commit 5beb8fe)
1 parent fd150ad commit 276275b

File tree

1 file changed

+15
-5
lines changed

1 file changed

+15
-5
lines changed

spring-kafka/src/test/java/org/springframework/kafka/listener/KafkaMessageListenerContainerTests.java

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2768,7 +2768,6 @@ public void rePausePartitionAfterRebalance() throws Exception {
27682768
rebal.get().onPartitionsAssigned(Set.of(tp0, tp1));
27692769
return null;
27702770
}).given(consumer).subscribe(eq(foos), any(ConsumerRebalanceListener.class));
2771-
final CountDownLatch resumeLatch = new CountDownLatch(1);
27722771
ContainerProperties containerProps = new ContainerProperties("foo");
27732772
containerProps.setGroupId("grp");
27742773
containerProps.setAckMode(AckMode.RECORD);
@@ -2779,7 +2778,6 @@ public void rePausePartitionAfterRebalance() throws Exception {
27792778
KafkaMessageListenerContainer<Integer, String> container =
27802779
new KafkaMessageListenerContainer<>(cf, containerProps);
27812780
container.start();
2782-
InOrder inOrder = inOrder(consumer);
27832781
assertThat(firstPoll.await(10, TimeUnit.SECONDS)).isNotNull();
27842782
container.pausePartition(tp0);
27852783
container.pausePartition(tp1);
@@ -2810,7 +2808,6 @@ public void resumePartitionAfterRevokeAndReAssign() throws Exception {
28102808
ConsumerFactory<Integer, String> cf = mock(ConsumerFactory.class);
28112809
Consumer<Integer, String> consumer = mock(Consumer.class);
28122810
given(cf.createConsumer(eq("grp"), eq("clientId"), isNull(), any())).willReturn(consumer);
2813-
AtomicBoolean first = new AtomicBoolean(true);
28142811
TopicPartition tp0 = new TopicPartition("foo", 0);
28152812
TopicPartition tp1 = new TopicPartition("foo", 1);
28162813
given(consumer.assignment()).willReturn(Set.of(tp0, tp1));
@@ -3466,6 +3463,7 @@ public void testCooperativeRebalance() throws Exception {
34663463
containerProps.setClientId("clientId");
34673464
containerProps.setMessageListener((MessageListener) msg -> { });
34683465
Properties consumerProps = new Properties();
3466+
containerProps.setMessageListener((MessageListener<?, ?>) msg -> { });
34693467
KafkaMessageListenerContainer<Integer, String> container =
34703468
new KafkaMessageListenerContainer<>(cf, containerProps);
34713469
container.start();
@@ -3609,7 +3607,6 @@ else if (call == 1) {
36093607
}).given(consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class));
36103608
List<Map<TopicPartition, OffsetAndMetadata>> commits = new ArrayList<>();
36113609
AtomicBoolean firstCommit = new AtomicBoolean(true);
3612-
AtomicInteger commitCount = new AtomicInteger();
36133610
willAnswer(invoc -> {
36143611
commits.add(invoc.getArgument(0, Map.class));
36153612
if (!firstCommit.getAndSet(false)) {
@@ -3891,6 +3888,11 @@ public void testInvokeRecordInterceptorAllSkipped(AckMode ackMode, boolean early
38913888
latch.countDown();
38923889
return null;
38933890
}).given(consumer).commitSync(any(), any());
3891+
CountDownLatch closeLatch = new CountDownLatch(1);
3892+
willAnswer(inv -> {
3893+
closeLatch.countDown();
3894+
return null;
3895+
}).given(consumer).close();
38943896
TopicPartitionOffset[] topicPartition = new TopicPartitionOffset[] {
38953897
new TopicPartitionOffset("foo", 0) };
38963898

@@ -3905,6 +3907,7 @@ public void testInvokeRecordInterceptorAllSkipped(AckMode ackMode, boolean early
39053907
containerProps.setTransactionManager(mock(PlatformTransactionManager.class));
39063908
}
39073909

3910+
CountDownLatch afterRecordLatch = new CountDownLatch(2);
39083911
RecordInterceptor<Integer, String> recordInterceptor = spy(new RecordInterceptor<Integer, String>() {
39093912

39103913
@Override
@@ -3915,6 +3918,10 @@ public ConsumerRecord<Integer, String> intercept(ConsumerRecord<Integer, String>
39153918
return null;
39163919
}
39173920

3921+
public void afterRecord(ConsumerRecord<Integer, String> record, Consumer<Integer, String> consumer) {
3922+
afterRecordLatch.countDown();
3923+
}
3924+
39183925
});
39193926

39203927
KafkaMessageListenerContainer<Integer, String> container =
@@ -3923,6 +3930,9 @@ public ConsumerRecord<Integer, String> intercept(ConsumerRecord<Integer, String>
39233930
container.setInterceptBeforeTx(early);
39243931
container.start();
39253932
assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
3933+
assertThat(afterRecordLatch.await(10, TimeUnit.SECONDS)).isTrue();
3934+
container.stop();
3935+
assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue();
39263936

39273937
InOrder inOrder = inOrder(recordInterceptor, consumer);
39283938
inOrder.verify(recordInterceptor).setupThreadState(eq(consumer));
@@ -3949,7 +3959,7 @@ public ConsumerRecord<Integer, String> intercept(ConsumerRecord<Integer, String>
39493959
inOrder.verify(consumer).commitSync(eq(Map.of(new TopicPartition("foo", 0), new OffsetAndMetadata(2L))),
39503960
any(Duration.class));
39513961
}
3952-
container.stop();
3962+
inOrder.verify(consumer).close();
39533963
}
39543964

39553965
@ParameterizedTest(name = "{index} testInvokeBatchInterceptorAllSkipped early intercept {0}")

0 commit comments

Comments
 (0)