Skip to content

Commit b2b41d9

Browse files
authored
GH-2195: Fix Fix No Seek Retries with Pause
Related to #2195 Similar to the recent fix when the first partition is paused, we should not continue to process the remaining records if the container itself was paused during the poll after an error occurred with `seekAfterError=false`. **cherry-pick to 2.9.x** * Remove unneeded copy/paste code from test. * Fix creation of remaining records. Previously, they were all inserted into the failed record's `TopicPartition` record list.
1 parent 1eca673 commit b2b41d9

File tree

3 files changed

+239
-4
lines changed

3 files changed

+239
-4
lines changed

spring-kafka/src/main/java/org/springframework/kafka/listener/KafkaMessageListenerContainer.java

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1571,7 +1571,7 @@ private ConsumerRecords<K, V> doPoll() {
15711571
KafkaMessageListenerContainer.this.emergencyStop.run();
15721572
}
15731573
TopicPartition firstPart = this.pendingRecordsAfterError.partitions().iterator().next();
1574-
boolean isPaused = isPartitionPauseRequested(firstPart);
1574+
boolean isPaused = isPaused() || isPartitionPauseRequested(firstPart);
15751575
this.logger.debug(() -> "First pending after error: " + firstPart + "; paused: " + isPaused);
15761576
if (!isPaused) {
15771577
records = this.pendingRecordsAfterError;
@@ -2782,8 +2782,9 @@ private void invokeErrorHandler(final ConsumerRecord<K, V> record,
27822782
tp -> new ArrayList<ConsumerRecord<K, V>>()).add(record);
27832783
}
27842784
while (iterator.hasNext()) {
2785-
records.computeIfAbsent(new TopicPartition(record.topic(), record.partition()),
2786-
tp -> new ArrayList<ConsumerRecord<K, V>>()).add(iterator.next());
2785+
ConsumerRecord<K, V> next = iterator.next();
2786+
records.computeIfAbsent(new TopicPartition(next.topic(), next.partition()),
2787+
tp -> new ArrayList<ConsumerRecord<K, V>>()).add(next);
27872788
}
27882789
if (records.size() > 0) {
27892790
this.pendingRecordsAfterError = new ConsumerRecords<>(records);
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,234 @@
1+
/*
2+
* Copyright 2022 the original author or authors.
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* https://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package org.springframework.kafka.listener;
18+
19+
import static org.assertj.core.api.Assertions.assertThat;
20+
import static org.mockito.ArgumentMatchers.any;
21+
import static org.mockito.ArgumentMatchers.anyLong;
22+
import static org.mockito.ArgumentMatchers.anyMap;
23+
import static org.mockito.BDDMockito.given;
24+
import static org.mockito.BDDMockito.willAnswer;
25+
import static org.mockito.Mockito.inOrder;
26+
import static org.mockito.Mockito.mock;
27+
import static org.mockito.Mockito.never;
28+
import static org.mockito.Mockito.verify;
29+
30+
import java.time.Duration;
31+
import java.util.ArrayList;
32+
import java.util.Arrays;
33+
import java.util.Collection;
34+
import java.util.Collections;
35+
import java.util.HashSet;
36+
import java.util.LinkedHashMap;
37+
import java.util.List;
38+
import java.util.Map;
39+
import java.util.Optional;
40+
import java.util.concurrent.CountDownLatch;
41+
import java.util.concurrent.TimeUnit;
42+
import java.util.concurrent.atomic.AtomicInteger;
43+
44+
import org.apache.kafka.clients.consumer.Consumer;
45+
import org.apache.kafka.clients.consumer.ConsumerRecord;
46+
import org.apache.kafka.clients.consumer.ConsumerRecords;
47+
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
48+
import org.apache.kafka.common.TopicPartition;
49+
import org.apache.kafka.common.header.internals.RecordHeaders;
50+
import org.apache.kafka.common.record.TimestampType;
51+
import org.junit.jupiter.api.Test;
52+
import org.mockito.InOrder;
53+
54+
import org.springframework.beans.factory.annotation.Autowired;
55+
import org.springframework.context.annotation.Bean;
56+
import org.springframework.context.annotation.Configuration;
57+
import org.springframework.kafka.annotation.EnableKafka;
58+
import org.springframework.kafka.annotation.KafkaListener;
59+
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
60+
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
61+
import org.springframework.kafka.core.ConsumerFactory;
62+
import org.springframework.kafka.listener.ContainerProperties.AckMode;
63+
import org.springframework.kafka.support.KafkaHeaders;
64+
import org.springframework.kafka.test.utils.KafkaTestUtils;
65+
import org.springframework.messaging.handler.annotation.Header;
66+
import org.springframework.test.annotation.DirtiesContext;
67+
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
68+
69+
/**
70+
* @author Gary Russell
71+
* @since 2.9
72+
*
73+
*/
74+
@SpringJUnitConfig
75+
@DirtiesContext
76+
public class DefaultErrorHandlerNoSeeksRecordAckNoResumeContainerPausedTests {
77+
78+
@SuppressWarnings("rawtypes")
79+
@Autowired
80+
private Consumer consumer;
81+
82+
@Autowired
83+
private Config config;
84+
85+
@Autowired
86+
private KafkaListenerEndpointRegistry registry;
87+
88+
@SuppressWarnings("unchecked")
89+
@Test
90+
public void doesNotResumeIfPartitionPaused() throws Exception {
91+
assertThat(this.config.deliveryLatch.await(10, TimeUnit.SECONDS)).isTrue();
92+
assertThat(this.config.commitLatch.await(10, TimeUnit.SECONDS)).isTrue();
93+
assertThat(this.config.pollLatch.await(10, TimeUnit.SECONDS)).isTrue();
94+
this.registry.stop();
95+
assertThat(this.config.closeLatch.await(10, TimeUnit.SECONDS)).isTrue();
96+
InOrder inOrder = inOrder(this.consumer);
97+
inOrder.verify(this.consumer).assign(any(Collection.class));
98+
inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT));
99+
inOrder.verify(this.consumer).commitSync(
100+
Collections.singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(1L)),
101+
Duration.ofSeconds(60));
102+
inOrder.verify(this.consumer).commitSync(
103+
Collections.singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(2L)),
104+
Duration.ofSeconds(60));
105+
inOrder.verify(this.consumer).commitSync(
106+
Collections.singletonMap(new TopicPartition("foo", 1), new OffsetAndMetadata(1L)),
107+
Duration.ofSeconds(60));
108+
inOrder.verify(this.consumer).pause(any());
109+
inOrder.verify(this.consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT));
110+
verify(this.consumer, never()).resume(any());
111+
assertThat(this.config.count).isEqualTo(4);
112+
assertThat(this.config.contents).contains("foo", "bar", "baz", "qux");
113+
assertThat(this.config.deliveries).contains(1, 1, 1, 1);
114+
verify(this.consumer, never()).seek(any(), anyLong());
115+
}
116+
117+
@Configuration
118+
@EnableKafka
119+
public static class Config {
120+
121+
final List<String> contents = new ArrayList<>();
122+
123+
final List<Integer> deliveries = new ArrayList<>();
124+
125+
final CountDownLatch pollLatch = new CountDownLatch(4);
126+
127+
final CountDownLatch deliveryLatch = new CountDownLatch(4);
128+
129+
final CountDownLatch closeLatch = new CountDownLatch(1);
130+
131+
final CountDownLatch commitLatch = new CountDownLatch(3);
132+
133+
int count;
134+
135+
@KafkaListener(id = "id", groupId = "grp",
136+
topicPartitions = @org.springframework.kafka.annotation.TopicPartition(topic = "foo",
137+
partitions = "#{'0,1,2'.split(',')}"))
138+
public void foo(String in, @Header(KafkaHeaders.DELIVERY_ATTEMPT) int delivery) {
139+
this.contents.add(in);
140+
this.deliveries.add(delivery);
141+
this.deliveryLatch.countDown();
142+
if (++this.count == 4 || this.count == 5) { // part 1, offset 1, first and second times
143+
throw new RuntimeException("foo");
144+
}
145+
}
146+
147+
@SuppressWarnings({ "rawtypes" })
148+
@Bean
149+
public ConsumerFactory consumerFactory(KafkaListenerEndpointRegistry registry) {
150+
ConsumerFactory consumerFactory = mock(ConsumerFactory.class);
151+
final Consumer consumer = consumer(registry);
152+
given(consumerFactory.createConsumer("grp", "", "-0", KafkaTestUtils.defaultPropertyOverrides()))
153+
.willReturn(consumer);
154+
return consumerFactory;
155+
}
156+
157+
@SuppressWarnings({ "rawtypes", "unchecked" })
158+
@Bean
159+
public Consumer consumer(KafkaListenerEndpointRegistry registry) {
160+
final Consumer consumer = mock(Consumer.class);
161+
final TopicPartition topicPartition0 = new TopicPartition("foo", 0);
162+
final TopicPartition topicPartition1 = new TopicPartition("foo", 1);
163+
final TopicPartition topicPartition2 = new TopicPartition("foo", 2);
164+
Map<TopicPartition, List<ConsumerRecord>> records1 = new LinkedHashMap<>();
165+
records1.put(topicPartition0, Arrays.asList(
166+
new ConsumerRecord("foo", 0, 0L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0, 0, null, "foo",
167+
new RecordHeaders(), Optional.empty()),
168+
new ConsumerRecord("foo", 0, 1L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0, 0, null, "bar",
169+
new RecordHeaders(), Optional.empty())));
170+
records1.put(topicPartition1, Arrays.asList(
171+
new ConsumerRecord("foo", 1, 0L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0, 0, null, "baz",
172+
new RecordHeaders(), Optional.empty()),
173+
new ConsumerRecord("foo", 1, 1L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0, 0, null, "qux",
174+
new RecordHeaders(), Optional.empty())));
175+
records1.put(topicPartition2, Arrays.asList(
176+
new ConsumerRecord("foo", 2, 0L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0, 0, null, "fiz",
177+
new RecordHeaders(), Optional.empty()),
178+
new ConsumerRecord("foo", 2, 1L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0, 0, null, "buz",
179+
new RecordHeaders(), Optional.empty())));
180+
final AtomicInteger which = new AtomicInteger();
181+
willAnswer(i -> {
182+
this.pollLatch.countDown();
183+
switch (which.getAndIncrement()) {
184+
case 0:
185+
return new ConsumerRecords(records1);
186+
default:
187+
try {
188+
Thread.sleep(50);
189+
}
190+
catch (InterruptedException e) {
191+
Thread.currentThread().interrupt();
192+
}
193+
return new ConsumerRecords(Collections.emptyMap());
194+
}
195+
}).given(consumer).poll(Duration.ofMillis(ContainerProperties.DEFAULT_POLL_TIMEOUT));
196+
List<TopicPartition> paused = new ArrayList<>();
197+
willAnswer(i -> {
198+
this.commitLatch.countDown();
199+
registry.getListenerContainer("id").pause();
200+
return null;
201+
}).given(consumer).commitSync(anyMap(), any());
202+
willAnswer(i -> {
203+
this.closeLatch.countDown();
204+
return null;
205+
}).given(consumer).close();
206+
willAnswer(i -> {
207+
paused.addAll(i.getArgument(0));
208+
return null;
209+
}).given(consumer).pause(any());
210+
willAnswer(i -> {
211+
return new HashSet<>(paused);
212+
}).given(consumer).paused();
213+
willAnswer(i -> {
214+
paused.removeAll(i.getArgument(0));
215+
return null;
216+
}).given(consumer).resume(any());
217+
return consumer;
218+
}
219+
220+
@SuppressWarnings({ "rawtypes", "unchecked" })
221+
@Bean
222+
ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory(KafkaListenerEndpointRegistry registry) {
223+
ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
224+
factory.setConsumerFactory(consumerFactory(registry));
225+
factory.getContainerProperties().setAckMode(AckMode.RECORD);
226+
DefaultErrorHandler eh = new DefaultErrorHandler();
227+
eh.setSeekAfterError(false);
228+
factory.setCommonErrorHandler(eh);
229+
return factory;
230+
}
231+
232+
}
233+
234+
}
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@
7373
*/
7474
@SpringJUnitConfig
7575
@DirtiesContext
76-
public class DefaultErrorHandlerNoSeeksRecordAckNoResumeTests {
76+
public class DefaultErrorHandlerNoSeeksRecordAckNoResumePartitionTests {
7777

7878
@SuppressWarnings("rawtypes")
7979
@Autowired

0 commit comments

Comments
 (0)