|
17 | 17 | import static com.rabbitmq.stream.impl.TestUtils.waitAtMost;
|
18 | 18 | import static org.assertj.core.api.Assertions.assertThat;
|
19 | 19 |
|
20 |
| -import com.rabbitmq.stream.*; |
| 20 | +import com.rabbitmq.stream.BackOffDelayPolicy; |
| 21 | +import com.rabbitmq.stream.ConfirmationHandler; |
| 22 | +import com.rabbitmq.stream.ConfirmationStatus; |
| 23 | +import com.rabbitmq.stream.Constants; |
| 24 | +import com.rabbitmq.stream.Environment; |
| 25 | +import com.rabbitmq.stream.EnvironmentBuilder; |
| 26 | +import com.rabbitmq.stream.Host; |
| 27 | +import com.rabbitmq.stream.Producer; |
| 28 | +import com.rabbitmq.stream.StreamException; |
21 | 29 | import com.rabbitmq.stream.impl.StreamProducer.Status;
|
22 | 30 | import io.netty.channel.EventLoopGroup;
|
23 | 31 | import java.nio.charset.StandardCharsets;
|
24 | 32 | import java.time.Duration;
|
25 | 33 | import java.util.List;
|
26 | 34 | import java.util.Map;
|
| 35 | +import java.util.SortedSet; |
| 36 | +import java.util.TreeSet; |
27 | 37 | import java.util.UUID;
|
28 | 38 | import java.util.concurrent.ConcurrentHashMap;
|
29 | 39 | import java.util.concurrent.CountDownLatch;
|
|
34 | 44 | import java.util.concurrent.atomic.AtomicInteger;
|
35 | 45 | import java.util.concurrent.atomic.AtomicLong;
|
36 | 46 | import java.util.concurrent.atomic.AtomicReference;
|
| 47 | +import java.util.function.Consumer; |
37 | 48 | import java.util.stream.Collectors;
|
38 | 49 | import java.util.stream.IntStream;
|
39 | 50 | import org.junit.jupiter.api.AfterEach;
|
@@ -289,4 +300,134 @@ void shouldRecoverAfterConnectionIsKilled(int subEntrySize) throws Exception {
|
289 | 300 | .build();
|
290 | 301 | assertThat(consumeLatch.await(10, TimeUnit.SECONDS)).isTrue();
|
291 | 302 | }
|
| 303 | + |
| 304 | + @ParameterizedTest |
| 305 | + @ValueSource(ints = {1, 7}) |
| 306 | + void messagesShouldBeDeDuplicatedWhenUsingNameAndPublishingId(int subEntrySize) throws Exception { |
| 307 | + int lineCount = 50_000; |
| 308 | + int firstWaveLineCount = lineCount / 5; |
| 309 | + int backwardCount = firstWaveLineCount / 10; |
| 310 | + SortedSet<Integer> document = new TreeSet<>(); |
| 311 | + IntStream.range(0, lineCount).forEach(i -> document.add(i)); |
| 312 | + Producer producer = |
| 313 | + environment.producerBuilder().name("producer-1").stream(stream) |
| 314 | + .subEntrySize(subEntrySize) |
| 315 | + .build(); |
| 316 | + |
| 317 | + AtomicReference<CountDownLatch> latch = |
| 318 | + new AtomicReference<>(new CountDownLatch(firstWaveLineCount)); |
| 319 | + ConfirmationHandler confirmationHandler = confirmationStatus -> latch.get().countDown(); |
| 320 | + Consumer<Integer> publishMessage = |
| 321 | + i -> |
| 322 | + producer.send( |
| 323 | + producer |
| 324 | + .messageBuilder() |
| 325 | + .publishingId(i) |
| 326 | + .addData(String.valueOf(i).getBytes()) |
| 327 | + .build(), |
| 328 | + confirmationHandler); |
| 329 | + document.headSet(firstWaveLineCount).forEach(publishMessage); |
| 330 | + |
| 331 | + assertThat(latch.get().await(10, TimeUnit.SECONDS)).isTrue(); |
| 332 | + |
| 333 | + latch.set(new CountDownLatch(lineCount - firstWaveLineCount + backwardCount)); |
| 334 | + |
| 335 | + document.tailSet(firstWaveLineCount - backwardCount).forEach(publishMessage); |
| 336 | + |
| 337 | + assertThat(latch.get().await(5, TimeUnit.SECONDS)).isTrue(); |
| 338 | + |
| 339 | + CountDownLatch consumeLatch = new CountDownLatch(lineCount); |
| 340 | + AtomicInteger consumed = new AtomicInteger(); |
| 341 | + environment.consumerBuilder().stream(stream) |
| 342 | + .messageHandler( |
| 343 | + (offset, message) -> { |
| 344 | + consumed.incrementAndGet(); |
| 345 | + consumeLatch.countDown(); |
| 346 | + }) |
| 347 | + .build(); |
| 348 | + assertThat(consumeLatch.await(10, TimeUnit.SECONDS)).isTrue(); |
| 349 | + Thread.sleep(1000); |
| 350 | + // if we are using sub-entries, we cannot avoid duplicates. |
| 351 | + // here, a sub-entry in the second wave, right at the end of the re-submitted |
| 352 | + // values will contain those duplicates, because its publishing ID will be |
| 353 | + // the one of its last message, so the server will accept the whole sub-entry, |
| 354 | + // including the duplicates. |
| 355 | + assertThat(consumed.get()).isEqualTo(lineCount + backwardCount % subEntrySize); |
| 356 | + } |
| 357 | + |
| 358 | + @ParameterizedTest |
| 359 | + @ValueSource(ints = {1, 7}) |
| 360 | + void newIncarnationOfProducerCanQueryItsLastPublishingId(int subEntrySize) throws Exception { |
| 361 | + Producer p = |
| 362 | + environment.producerBuilder().name("producer-1").stream(stream) |
| 363 | + .subEntrySize(subEntrySize) |
| 364 | + .build(); |
| 365 | + |
| 366 | + AtomicReference<Producer> producer = new AtomicReference<>(p); |
| 367 | + |
| 368 | + AtomicLong publishingSequence = new AtomicLong(0); |
| 369 | + AtomicLong lastConfirmed = new AtomicLong(-1); |
| 370 | + ConfirmationHandler confirmationHandler = |
| 371 | + confirmationStatus -> { |
| 372 | + if (confirmationStatus.isConfirmed()) { |
| 373 | + lastConfirmed.set(confirmationStatus.getMessage().getPublishingId()); |
| 374 | + } |
| 375 | + }; |
| 376 | + |
| 377 | + AtomicBoolean canPublish = new AtomicBoolean(true); |
| 378 | + Runnable publish = |
| 379 | + () -> { |
| 380 | + while (canPublish.get()) { |
| 381 | + producer |
| 382 | + .get() |
| 383 | + .send( |
| 384 | + producer |
| 385 | + .get() |
| 386 | + .messageBuilder() |
| 387 | + .publishingId(publishingSequence.getAndIncrement()) |
| 388 | + .addData(String.valueOf(publishingSequence.get()).getBytes()) |
| 389 | + .build(), |
| 390 | + confirmationHandler); |
| 391 | + } |
| 392 | + }; |
| 393 | + new Thread(publish).start(); |
| 394 | + |
| 395 | + Thread.sleep(1000L); |
| 396 | + canPublish.set(false); |
| 397 | + waitAtMost(10, () -> publishingSequence.get() == lastConfirmed.get() + 1); |
| 398 | + assertThat(lastConfirmed.get()).isPositive(); |
| 399 | + |
| 400 | + producer.get().close(); |
| 401 | + |
| 402 | + p = |
| 403 | + environment.producerBuilder().name("producer-1").stream(stream) |
| 404 | + .subEntrySize(subEntrySize) |
| 405 | + .build(); |
| 406 | + producer.set(p); |
| 407 | + |
| 408 | + long lastPublishingId = producer.get().getLastPublishingId(); |
| 409 | + assertThat(lastPublishingId).isEqualTo(lastConfirmed.get()); |
| 410 | + |
| 411 | + canPublish.set(true); |
| 412 | + new Thread(publish).start(); |
| 413 | + |
| 414 | + Thread.sleep(1000L); |
| 415 | + canPublish.set(false); |
| 416 | + |
| 417 | + waitAtMost(10, () -> publishingSequence.get() == lastConfirmed.get() + 1); |
| 418 | + assertThat(lastConfirmed.get()).isGreaterThan(lastPublishingId); |
| 419 | + |
| 420 | + CountDownLatch consumeLatch = new CountDownLatch((int) (lastConfirmed.get() + 1)); |
| 421 | + AtomicInteger consumed = new AtomicInteger(); |
| 422 | + environment.consumerBuilder().stream(stream) |
| 423 | + .messageHandler( |
| 424 | + (offset, message) -> { |
| 425 | + consumed.incrementAndGet(); |
| 426 | + consumeLatch.countDown(); |
| 427 | + }) |
| 428 | + .build(); |
| 429 | + assertThat(consumeLatch.await(10, TimeUnit.SECONDS)).isTrue(); |
| 430 | + Thread.sleep(1000); |
| 431 | + assertThat(consumed.get()).isEqualTo(lastConfirmed.get() + 1); |
| 432 | + } |
292 | 433 | }
|
0 commit comments