18
18
import static com .rabbitmq .stream .impl .TestUtils .deleteSuperStreamTopology ;
19
19
import static com .rabbitmq .stream .impl .TestUtils .latchAssert ;
20
20
import static com .rabbitmq .stream .impl .TestUtils .localhost ;
21
+ import static com .rabbitmq .stream .impl .TestUtils .waitAtMost ;
22
+ import static com .rabbitmq .stream .impl .TestUtils .wrap ;
21
23
import static org .assertj .core .api .Assertions .assertThat ;
22
24
23
25
import com .rabbitmq .client .Connection ;
27
29
import com .rabbitmq .stream .EnvironmentBuilder ;
28
30
import com .rabbitmq .stream .OffsetSpecification ;
29
31
import com .rabbitmq .stream .impl .Client .ClientParameters ;
32
+ import com .rabbitmq .stream .impl .Client .QueryOffsetResponse ;
30
33
import io .netty .channel .EventLoopGroup ;
31
34
import java .nio .charset .StandardCharsets ;
35
+ import java .time .Duration ;
32
36
import java .util .Collections ;
33
37
import java .util .List ;
34
38
import java .util .concurrent .ConcurrentHashMap ;
@@ -174,9 +178,14 @@ void manualOffsetTrackingShouldStoreOnAllPartitions() throws Exception {
174
178
// offset near the end (the message count per partition minus a few messages)
175
179
long almostLastOffset = messageCount / partitionCount - messageCount / (partitionCount * 10 );
176
180
partitions .forEach (
177
- p ->
178
- assertThat (client .queryOffset (consumerName , p ).getOffset ())
179
- .isGreaterThan (almostLastOffset ));
181
+ wrap (
182
+ p -> {
183
+ waitAtMost (
184
+ () -> {
185
+ QueryOffsetResponse response = client .queryOffset (consumerName , p );
186
+ return response .isOk () && response .getOffset () > almostLastOffset ;
187
+ });
188
+ }));
180
189
consumer .close ();
181
190
}
182
191
@@ -211,9 +220,6 @@ void autoOffsetTrackingShouldStoreOnAllPartitions() throws Exception {
211
220
messagesReceived .get (partition ).incrementAndGet ();
212
221
lastOffsets .put (partition , context .offset ());
213
222
totalCount .incrementAndGet ();
214
- if (totalCount .get () % 50 == 0 ) {
215
- context .storeOffset ();
216
- }
217
223
consumeLatch .countDown ();
218
224
})
219
225
.build ();
@@ -228,9 +234,69 @@ void autoOffsetTrackingShouldStoreOnAllPartitions() throws Exception {
228
234
// offset near the end (the message count per partition minus a few messages)
229
235
long almostLastOffset = messageCount / partitionCount - messageCount / (partitionCount * 10 );
230
236
partitions .forEach (
231
- p ->
232
- assertThat (client .queryOffset (consumerName , p ).getOffset ())
233
- .isGreaterThan (almostLastOffset ));
237
+ wrap (
238
+ p -> {
239
+ waitAtMost (
240
+ () -> {
241
+ QueryOffsetResponse response = client .queryOffset (consumerName , p );
242
+ return response .isOk () && response .getOffset () > almostLastOffset ;
243
+ });
244
+ }));
234
245
consumer .close ();
235
246
}
247
+
248
+ @ Test
249
+ void autoOffsetTrackingShouldStoreOffsetZero () throws Exception {
250
+ declareSuperStreamTopology (connection , superStream , partitionCount );
251
+ Client client = cf .get ();
252
+ List <String > partitions = client .partitions (superStream );
253
+ int messageCount = partitionCount ;
254
+ publishToPartitions (cf , partitions , messageCount );
255
+ ConcurrentMap <String , AtomicInteger > messagesReceived = new ConcurrentHashMap <>(partitionCount );
256
+ ConcurrentMap <String , Long > lastOffsets = new ConcurrentHashMap <>(partitionCount );
257
+ partitions .forEach (
258
+ p -> {
259
+ messagesReceived .put (p , new AtomicInteger (0 ));
260
+ });
261
+ CountDownLatch consumeLatch = new CountDownLatch (messageCount );
262
+ String consumerName = "my-app" ;
263
+ AtomicInteger totalCount = new AtomicInteger ();
264
+ Consumer consumer =
265
+ environment
266
+ .consumerBuilder ()
267
+ .superStream (superStream )
268
+ .offset (OffsetSpecification .first ())
269
+ .name (consumerName )
270
+ .autoTrackingStrategy ()
271
+ .flushInterval (Duration .ofHours (1 )) // long enough
272
+ .builder ()
273
+ .messageHandler (
274
+ (context , message ) -> {
275
+ String partition = new String (message .getBodyAsBinary ());
276
+ messagesReceived .get (partition ).incrementAndGet ();
277
+ lastOffsets .put (partition , context .offset ());
278
+ totalCount .incrementAndGet ();
279
+ consumeLatch .countDown ();
280
+ })
281
+ .build ();
282
+ latchAssert (consumeLatch ).completes ();
283
+ assertThat (messagesReceived ).hasSize (partitionCount );
284
+ partitions .forEach (
285
+ p -> {
286
+ assertThat (messagesReceived ).containsKey (p );
287
+ assertThat (messagesReceived .get (p ).get ()).isEqualTo (messageCount / partitionCount );
288
+ });
289
+ consumer .close ();
290
+ partitions .forEach (
291
+ wrap (
292
+ p -> {
293
+ assertThat (lastOffsets .get (p )).isZero ();
294
+ waitAtMost (
295
+ () -> {
296
+ QueryOffsetResponse response = client .queryOffset (consumerName , p );
297
+ return response .isOk ()
298
+ && response .getOffset () == lastOffsets .get (p ).longValue ();
299
+ });
300
+ }));
301
+ }
236
302
}
0 commit comments