From 40052ef3a87e7af9b81f884bbf585a18310d279e Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Wed, 21 Apr 2021 10:36:46 +0200 Subject: [PATCH 1/6] Prepare issue branch. --- pom.xml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 0d315ea58e..3c46c5facb 100644 --- a/pom.xml +++ b/pom.xml @@ -1,11 +1,13 @@ - + 4.0.0 org.springframework.data spring-data-redis - 2.6.0-SNAPSHOT + 2.6.0-GH-1721-SNAPSHOT Spring Data Redis From fc083d0f473f8f5271fe60e936fb108459133284 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Wed, 21 Apr 2021 10:38:07 +0200 Subject: [PATCH 2/6] =?UTF-8?q?Add=20ScanOptions.pattern(=E2=80=A6)=20acce?= =?UTF-8?q?pting=20a=20byte[]=20pattern.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We now accept a byte array as pattern for scan operations in addition to String patterns. Also, use binary Jedis scan method to avoid bytes to String conversion. Closes #2006 --- .../connection/jedis/JedisConverters.java | 5 ++- .../connection/jedis/JedisKeyCommands.java | 5 ++- .../connection/lettuce/LettuceConverters.java | 5 ++- .../data/redis/core/ScanOptions.java | 39 +++++++++++++++++-- .../jedis/JedisConnectionUnitTests.java | 8 ++-- 5 files changed, 48 insertions(+), 14 deletions(-) diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisConverters.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisConverters.java index 18e7f2662a..1498b3f902 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisConverters.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisConverters.java @@ -529,8 +529,9 @@ public static ScanParams toScanParams(ScanOptions options) { if (options.getCount() != null) { sp.count(options.getCount().intValue()); } - if (StringUtils.hasText(options.getPattern())) { - sp.match(options.getPattern()); + byte[] pattern = options.getBytePattern(); + if (pattern != null) { + sp.match(pattern); } } return sp; diff --git a/src/main/java/org/springframework/data/redis/connection/jedis/JedisKeyCommands.java b/src/main/java/org/springframework/data/redis/connection/jedis/JedisKeyCommands.java index 969727ce94..cf1e50192e 100644 --- a/src/main/java/org/springframework/data/redis/connection/jedis/JedisKeyCommands.java +++ b/src/main/java/org/springframework/data/redis/connection/jedis/JedisKeyCommands.java @@ -168,9 +168,10 @@ protected ScanIteration doScan(long cursorId, ScanOptions options) { } ScanParams params = JedisConverters.toScanParams(options); - redis.clients.jedis.ScanResult result = connection.getJedis().scan(Long.toString(cursorId), params); + redis.clients.jedis.ScanResult result = connection.getJedis().scan(Long.toString(cursorId).getBytes(), + params); return new ScanIteration<>(Long.parseLong(result.getCursor()), - JedisConverters.stringListToByteList().convert(result.getResult())); + result.getResult()); } protected void doClose() { diff --git a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConverters.java b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConverters.java index 9d98798050..b61cb7a9ae 100644 --- a/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConverters.java +++ b/src/main/java/org/springframework/data/redis/connection/lettuce/LettuceConverters.java @@ -868,8 +868,9 @@ static ScanArgs toScanArgs(@Nullable ScanOptions options) { ScanArgs scanArgs = new ScanArgs(); - if (options.getPattern() != null) { - scanArgs.match(options.getPattern()); + byte[] pattern = options.getBytePattern(); + if (pattern != null) { + scanArgs.match(pattern); } if (options.getCount() != null) { diff --git a/src/main/java/org/springframework/data/redis/core/ScanOptions.java b/src/main/java/org/springframework/data/redis/core/ScanOptions.java index a80aa80b5c..56a8973928 100644 --- a/src/main/java/org/springframework/data/redis/core/ScanOptions.java +++ b/src/main/java/org/springframework/data/redis/core/ScanOptions.java @@ -31,14 +31,16 @@ public class ScanOptions { /** * Constant to apply default {@link ScanOptions} without setting a limit or matching a pattern. */ - public static ScanOptions NONE = new ScanOptions(null, null); + public static ScanOptions NONE = new ScanOptions(null, null, null); private final @Nullable Long count; private final @Nullable String pattern; + private final @Nullable byte[] bytePattern; - private ScanOptions(@Nullable Long count, @Nullable String pattern) { + private ScanOptions(@Nullable Long count, @Nullable String pattern, @Nullable byte[] bytePattern) { this.count = count; this.pattern = pattern; + this.bytePattern = bytePattern; } /** @@ -57,9 +59,24 @@ public Long getCount() { @Nullable public String getPattern() { + + if (bytePattern != null && pattern == null) { + return new String(bytePattern); + } + return pattern; } + @Nullable + public byte[] getBytePattern() { + + if (bytePattern == null && pattern != null) { + return pattern.getBytes(); + } + + return bytePattern; + } + public String toOptionString() { if (this.equals(ScanOptions.NONE)) { @@ -71,7 +88,8 @@ public String toOptionString() { if (this.count != null) { params += (", 'count', " + count); } - if (StringUtils.hasText(this.pattern)) { + String pattern = getPattern(); + if (StringUtils.hasText(pattern)) { params += (", 'match' , '" + this.pattern + "'"); } @@ -87,6 +105,7 @@ public static class ScanOptionsBuilder { private @Nullable Long count; private @Nullable String pattern; + private @Nullable byte[] bytePattern; /** @@ -111,13 +130,25 @@ public ScanOptionsBuilder match(String pattern) { return this; } + /** + * Returns the current {@link ScanOptionsBuilder} configured with the given {@code pattern}. + * + * @param pattern + * @return + * @since 2.6 + */ + public ScanOptionsBuilder match(byte[] pattern) { + this.bytePattern = pattern; + return this; + } + /** * Builds a new {@link ScanOptions} objects. * * @return a new {@link ScanOptions} objects. */ public ScanOptions build() { - return new ScanOptions(count, pattern); + return new ScanOptions(count, pattern, bytePattern); } } } diff --git a/src/test/java/org/springframework/data/redis/connection/jedis/JedisConnectionUnitTests.java b/src/test/java/org/springframework/data/redis/connection/jedis/JedisConnectionUnitTests.java index d8533aba12..204c6f2da7 100644 --- a/src/test/java/org/springframework/data/redis/connection/jedis/JedisConnectionUnitTests.java +++ b/src/test/java/org/springframework/data/redis/connection/jedis/JedisConnectionUnitTests.java @@ -157,10 +157,10 @@ void zRangeByScoreShouldThrowExceptionWhenCountExceedsIntegerRange() { Integer.MAX_VALUE, (long) Integer.MAX_VALUE + 1L)); } - @Test // DATAREDIS-531 + @Test // DATAREDIS-531, GH-2006 public void scanShouldKeepTheConnectionOpen() { - doReturn(new ScanResult<>("0", Collections. emptyList())).when(jedisSpy).scan(anyString(), + doReturn(new ScanResult<>("0", Collections. emptyList())).when(jedisSpy).scan(any(byte[].class), any(ScanParams.class)); connection.scan(ScanOptions.NONE); @@ -168,10 +168,10 @@ public void scanShouldKeepTheConnectionOpen() { verify(jedisSpy, never()).quit(); } - @Test // DATAREDIS-531 + @Test // DATAREDIS-531, GH-2006 public void scanShouldCloseTheConnectionWhenCursorIsClosed() throws IOException { - doReturn(new ScanResult<>("0", Collections. emptyList())).when(jedisSpy).scan(anyString(), + doReturn(new ScanResult<>("0", Collections. emptyList())).when(jedisSpy).scan(any(byte[].class), any(ScanParams.class)); Cursor cursor = connection.scan(ScanOptions.NONE); From 44f78c285cd7f048ba0d901dff809194abce5a8c Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Wed, 21 Apr 2021 10:58:09 +0200 Subject: [PATCH 3/6] Add support for configurable batch strategies using RedisCache. We now support a configurable BatchStrategy for RedisCache. The implementations consist of KEYS (default) and SCAN. Since SCAN is not supported with Jedis Cluster and SCAN requires a batch size, we default to KEYS. Closes #1721. --- .../data/redis/cache/BatchStrategy.java | 170 ++++++++++++++++++ .../redis/cache/DefaultRedisCacheWriter.java | 28 +-- .../data/redis/cache/RedisCacheManager.java | 4 +- .../data/redis/cache/RedisCacheWriter.java | 34 +++- .../cache/DefaultRedisCacheWriterTests.java | 3 +- .../redis/cache/LegacyRedisCacheTests.java | 3 +- .../data/redis/cache/RedisCacheTests.java | 40 ++++- 7 files changed, 258 insertions(+), 24 deletions(-) create mode 100644 src/main/java/org/springframework/data/redis/cache/BatchStrategy.java diff --git a/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java b/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java new file mode 100644 index 0000000000..5bf25537e7 --- /dev/null +++ b/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java @@ -0,0 +1,170 @@ +/* + * Copyright 2021 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.cache; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Optional; + +import org.springframework.data.redis.connection.RedisConnection; +import org.springframework.data.redis.core.Cursor; +import org.springframework.data.redis.core.ScanOptions; +import org.springframework.util.Assert; + +/** + * Batch strategies to be used with {@link RedisCacheWriter}. + *

+ * Primarily used to clear the cache. + * + * @author Mark Paluch + * @since 2.6 + */ +public abstract class BatchStrategy { + + /** + * Batching strategy using a single {@code KEYS} and {@code DEL} command to remove all matching keys. {@code KEYS} + * scans the entire keyspace of the Redis database and can block the Redis worker thread for a long time when the + * keyspace has a significant size. + *

+ * {@code KEYS} is supported for standalone and clustered (sharded) Redis operation modes. + * + * @return batching strategy using {@code KEYS}. + */ + public static BatchStrategy keys() { + return Keys.INSTANCE; + } + + /** + * Batching strategy using a {@code SCAN} cursors and potentially multiple {@code DEL} commands to remove all matching + * keys. This strategy allows a configurable batch size to optimize for scan batching. + *

+ * Note that using the {@code SCAN} strategy might be not supported on all drivers and Redis operation modes. + * + * @return batching strategy using {@code SCAN}. + */ + public static BatchStrategy scan(int batchSize) { + + Assert.isTrue(batchSize > 0, "Batch size must be greater than zero"); + + return new Scan(batchSize); + } + + /** + * Remove all keys following the given pattern. + * + * @param the connection to use. + * @param name The cache name must not be {@literal null}. + * @param pattern The pattern for the keys to remove. Must not be {@literal null}. + * @return number of removed keys. + */ + abstract int cleanCache(RedisConnection connection, String name, byte[] pattern); + + /** + * {@link BatchStrategy} using {@code KEYS}. + */ + static class Keys extends BatchStrategy { + + static Keys INSTANCE = new Keys(); + + @Override + int cleanCache(RedisConnection connection, String name, byte[] pattern) { + + byte[][] keys = Optional.ofNullable(connection.keys(pattern)).orElse(Collections.emptySet()) + .toArray(new byte[0][]); + + if (keys.length > 0) { + connection.del(keys); + } + + return keys.length; + } + } + + /** + * {@link BatchStrategy} using {@code SCAN}. + */ + static class Scan extends BatchStrategy { + + private final int batchSize; + + public Scan(int batchSize) { + this.batchSize = batchSize; + } + + @Override + int cleanCache(RedisConnection connection, String name, byte[] pattern) { + + Cursor cursor = connection.scan(ScanOptions.scanOptions().count(batchSize).match(pattern).build()); + + PartitionIterator partitions = new PartitionIterator<>(cursor, batchSize); + + int count = 0; + + while (partitions.hasNext()) { + + List keys = partitions.next(); + count += keys.size(); + + if (keys.size() > 0) { + connection.del(keys.toArray(new byte[0][])); + } + } + + return count; + } + } + + /** + * Utility to split and buffer outcome from a {@link Iterator} into {@link List lists} of {@code T} with a maximum + * chunks {@code size}. + * + * @param + */ + static class PartitionIterator implements Iterator> { + + private final Iterator iterator; + private final int size; + + public PartitionIterator(Iterator iterator, int size) { + this.iterator = iterator; + this.size = size; + } + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public List next() { + + if (!hasNext()) { + throw new NoSuchElementException(); + } + + List list = new ArrayList<>(size); + while (list.size() < size && iterator.hasNext()) { + list.add(iterator.next()); + } + + return list; + } + } + +} diff --git a/src/main/java/org/springframework/data/redis/cache/DefaultRedisCacheWriter.java b/src/main/java/org/springframework/data/redis/cache/DefaultRedisCacheWriter.java index 61530140c1..6cfe7b950b 100644 --- a/src/main/java/org/springframework/data/redis/cache/DefaultRedisCacheWriter.java +++ b/src/main/java/org/springframework/data/redis/cache/DefaultRedisCacheWriter.java @@ -17,8 +17,6 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; -import java.util.Collections; -import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; @@ -53,21 +51,24 @@ class DefaultRedisCacheWriter implements RedisCacheWriter { private final RedisConnectionFactory connectionFactory; private final Duration sleepTime; private final CacheStatisticsCollector statistics; + private final BatchStrategy batchStrategy; /** * @param connectionFactory must not be {@literal null}. + * @param batchStrategy must not be {@literal null}. */ - DefaultRedisCacheWriter(RedisConnectionFactory connectionFactory) { - this(connectionFactory, Duration.ZERO); + DefaultRedisCacheWriter(RedisConnectionFactory connectionFactory, BatchStrategy batchStrategy) { + this(connectionFactory, Duration.ZERO, batchStrategy); } /** * @param connectionFactory must not be {@literal null}. * @param sleepTime sleep time between lock request attempts. Must not be {@literal null}. Use {@link Duration#ZERO} * to disable locking. + * @param batchStrategy must not be {@literal null}. */ - DefaultRedisCacheWriter(RedisConnectionFactory connectionFactory, Duration sleepTime) { - this(connectionFactory, sleepTime, CacheStatisticsCollector.none()); + DefaultRedisCacheWriter(RedisConnectionFactory connectionFactory, Duration sleepTime, BatchStrategy batchStrategy) { + this(connectionFactory, sleepTime, CacheStatisticsCollector.none(), batchStrategy); } /** @@ -75,17 +76,20 @@ class DefaultRedisCacheWriter implements RedisCacheWriter { * @param sleepTime sleep time between lock request attempts. Must not be {@literal null}. Use {@link Duration#ZERO} * to disable locking. * @param cacheStatisticsCollector must not be {@literal null}. + * @param batchStrategy must not be {@literal null}. */ DefaultRedisCacheWriter(RedisConnectionFactory connectionFactory, Duration sleepTime, - CacheStatisticsCollector cacheStatisticsCollector) { + CacheStatisticsCollector cacheStatisticsCollector, BatchStrategy batchStrategy) { Assert.notNull(connectionFactory, "ConnectionFactory must not be null!"); Assert.notNull(sleepTime, "SleepTime must not be null!"); Assert.notNull(cacheStatisticsCollector, "CacheStatisticsCollector must not be null!"); + Assert.notNull(batchStrategy, "BatchStrategy must not be null!"); this.connectionFactory = connectionFactory; this.sleepTime = sleepTime; this.statistics = cacheStatisticsCollector; + this.batchStrategy = batchStrategy; } /* @@ -213,13 +217,9 @@ public void clean(String name, byte[] pattern) { wasLocked = true; } - byte[][] keys = Optional.ofNullable(connection.keys(pattern)).orElse(Collections.emptySet()) - .toArray(new byte[0][]); - if (keys.length > 0) { - statistics.incDeletesBy(name, keys.length); - connection.del(keys); - } + statistics.incDeletesBy(name, batchStrategy.cleanCache(connection, name, pattern)); + } finally { if (wasLocked && isLockingCacheWriter()) { @@ -255,7 +255,7 @@ public void clearStatistics(String name) { */ @Override public RedisCacheWriter withStatisticsCollector(CacheStatisticsCollector cacheStatisticsCollector) { - return new DefaultRedisCacheWriter(connectionFactory, sleepTime, cacheStatisticsCollector); + return new DefaultRedisCacheWriter(connectionFactory, sleepTime, cacheStatisticsCollector, this.batchStrategy); } /** diff --git a/src/main/java/org/springframework/data/redis/cache/RedisCacheManager.java b/src/main/java/org/springframework/data/redis/cache/RedisCacheManager.java index 5b6718e353..bd764f5d26 100644 --- a/src/main/java/org/springframework/data/redis/cache/RedisCacheManager.java +++ b/src/main/java/org/springframework/data/redis/cache/RedisCacheManager.java @@ -186,7 +186,7 @@ public static RedisCacheManager create(RedisConnectionFactory connectionFactory) Assert.notNull(connectionFactory, "ConnectionFactory must not be null!"); - return new RedisCacheManager(new DefaultRedisCacheWriter(connectionFactory), + return new RedisCacheManager(RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory), RedisCacheConfiguration.defaultCacheConfig()); } @@ -311,7 +311,7 @@ public static RedisCacheManagerBuilder fromConnectionFactory(RedisConnectionFact Assert.notNull(connectionFactory, "ConnectionFactory must not be null!"); - return new RedisCacheManagerBuilder(new DefaultRedisCacheWriter(connectionFactory)); + return new RedisCacheManagerBuilder(RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory)); } /** diff --git a/src/main/java/org/springframework/data/redis/cache/RedisCacheWriter.java b/src/main/java/org/springframework/data/redis/cache/RedisCacheWriter.java index 8d5d135cf4..abe121ee89 100644 --- a/src/main/java/org/springframework/data/redis/cache/RedisCacheWriter.java +++ b/src/main/java/org/springframework/data/redis/cache/RedisCacheWriter.java @@ -26,6 +26,9 @@ * caching.
* The {@link RedisCacheWriter} may be shared by multiple cache implementations and is responsible for writing / reading * binary data to / from Redis. The implementation honors potential cache lock flags that might be set. + *

+ * The default {@link RedisCacheWriter} implementation can be customized with {@link BatchStrategy} to tune performance + * behavior. * * @author Christoph Strobl * @author Mark Paluch @@ -40,10 +43,24 @@ public interface RedisCacheWriter extends CacheStatisticsProvider { * @return new instance of {@link DefaultRedisCacheWriter}. */ static RedisCacheWriter nonLockingRedisCacheWriter(RedisConnectionFactory connectionFactory) { + return nonLockingRedisCacheWriter(connectionFactory, BatchStrategy.keys()); + } + + /** + * Create new {@link RedisCacheWriter} without locking behavior. + * + * @param connectionFactory must not be {@literal null}. + * @param batchStrategy must not be {@literal null}. + * @return new instance of {@link DefaultRedisCacheWriter}. + * @since 2.6 + */ + static RedisCacheWriter nonLockingRedisCacheWriter(RedisConnectionFactory connectionFactory, + BatchStrategy batchStrategy) { Assert.notNull(connectionFactory, "ConnectionFactory must not be null!"); + Assert.notNull(batchStrategy, "BatchStrategy must not be null!"); - return new DefaultRedisCacheWriter(connectionFactory); + return new DefaultRedisCacheWriter(connectionFactory, batchStrategy); } /** @@ -53,10 +70,23 @@ static RedisCacheWriter nonLockingRedisCacheWriter(RedisConnectionFactory connec * @return new instance of {@link DefaultRedisCacheWriter}. */ static RedisCacheWriter lockingRedisCacheWriter(RedisConnectionFactory connectionFactory) { + return lockingRedisCacheWriter(connectionFactory, BatchStrategy.keys()); + } + + /** + * Create new {@link RedisCacheWriter} with locking behavior. + * + * @param connectionFactory must not be {@literal null}. + * @param batchStrategy must not be {@literal null}. + * @return new instance of {@link DefaultRedisCacheWriter}. + * @since 2.6 + */ + static RedisCacheWriter lockingRedisCacheWriter(RedisConnectionFactory connectionFactory, + BatchStrategy batchStrategy) { Assert.notNull(connectionFactory, "ConnectionFactory must not be null!"); - return new DefaultRedisCacheWriter(connectionFactory, Duration.ofMillis(50)); + return new DefaultRedisCacheWriter(connectionFactory, Duration.ofMillis(50), batchStrategy); } /** diff --git a/src/test/java/org/springframework/data/redis/cache/DefaultRedisCacheWriterTests.java b/src/test/java/org/springframework/data/redis/cache/DefaultRedisCacheWriterTests.java index 8310bdb459..fa3536f282 100644 --- a/src/test/java/org/springframework/data/redis/cache/DefaultRedisCacheWriterTests.java +++ b/src/test/java/org/springframework/data/redis/cache/DefaultRedisCacheWriterTests.java @@ -306,7 +306,8 @@ void lockingCacheWriterShouldExitWhenInterruptedWaitForLockRelease() throws Inte Thread th = new Thread(() -> { - DefaultRedisCacheWriter writer = new DefaultRedisCacheWriter(connectionFactory, Duration.ofMillis(50)) { + DefaultRedisCacheWriter writer = new DefaultRedisCacheWriter(connectionFactory, Duration.ofMillis(50), + BatchStrategy.keys()) { @Override boolean doCheckLock(String name, RedisConnection connection) { diff --git a/src/test/java/org/springframework/data/redis/cache/LegacyRedisCacheTests.java b/src/test/java/org/springframework/data/redis/cache/LegacyRedisCacheTests.java index 5dd9707125..46f93bb5d1 100644 --- a/src/test/java/org/springframework/data/redis/cache/LegacyRedisCacheTests.java +++ b/src/test/java/org/springframework/data/redis/cache/LegacyRedisCacheTests.java @@ -102,7 +102,8 @@ private RedisCache createCache() { cacheConfiguration = cacheConfiguration.disableCachingNullValues(); } - return new RedisCache(CACHE_NAME, new DefaultRedisCacheWriter(connectionFactory), cacheConfiguration); + return new RedisCache(CACHE_NAME, RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory), + cacheConfiguration); } protected Object getValue() { diff --git a/src/test/java/org/springframework/data/redis/cache/RedisCacheTests.java b/src/test/java/org/springframework/data/redis/cache/RedisCacheTests.java index 3e5fce8a74..a32d7c4bf4 100644 --- a/src/test/java/org/springframework/data/redis/cache/RedisCacheTests.java +++ b/src/test/java/org/springframework/data/redis/cache/RedisCacheTests.java @@ -16,6 +16,7 @@ package org.springframework.data.redis.cache; import static org.assertj.core.api.Assertions.*; +import static org.assertj.core.api.Assumptions.*; import lombok.AllArgsConstructor; import lombok.Data; @@ -37,6 +38,7 @@ import org.springframework.cache.support.NullValue; import org.springframework.data.redis.connection.RedisConnection; import org.springframework.data.redis.connection.RedisConnectionFactory; +import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; import org.springframework.data.redis.serializer.RedisSerializationContext.SerializationPair; import org.springframework.data.redis.serializer.RedisSerializer; import org.springframework.data.redis.test.extension.parametrized.MethodSource; @@ -81,7 +83,7 @@ void setUp() { doWithConnection(RedisConnection::flushAll); - cache = new RedisCache("cache", new DefaultRedisCacheWriter(connectionFactory), + cache = new RedisCache("cache", RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory), RedisCacheConfiguration.defaultCacheConfig().serializeValuesWith(SerializationPair.fromSerializer(serializer))); } @@ -251,6 +253,33 @@ void clearShouldClearCache() { }); } + @ParameterizedRedisTest // GH-1721 + void clearWithScanShouldClearCache() { + + // SCAN not supported via Jedis Cluster. + if (connectionFactory instanceof JedisConnectionFactory) { + assumeThat(((JedisConnectionFactory) connectionFactory).isRedisClusterAware()).isFalse(); + } + + RedisCache cache = new RedisCache("cache", + RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory, BatchStrategy.scan(25)), + RedisCacheConfiguration.defaultCacheConfig().serializeValuesWith(SerializationPair.fromSerializer(serializer))); + + doWithConnection(connection -> { + connection.set(binaryCacheKey, binaryNullValue); + connection.set("cache::foo".getBytes(), binaryNullValue); + connection.set("other".getBytes(), "value".getBytes()); + }); + + cache.clear(); + + doWithConnection(connection -> { + assertThat(connection.exists(binaryCacheKey)).isFalse(); + assertThat(connection.exists("cache::foo".getBytes())).isFalse(); + assertThat(connection.exists("other".getBytes())).isTrue(); + }); + } + @ParameterizedRedisTest // DATAREDIS-481 void getWithCallableShouldResolveValueIfNotPresent() { @@ -280,7 +309,8 @@ void getWithCallableShouldNotResolveValueIfPresent() { @ParameterizedRedisTest // DATAREDIS-715 void computePrefixCreatesCacheKeyCorrectly() { - RedisCache cacheWithCustomPrefix = new RedisCache("cache", new DefaultRedisCacheWriter(connectionFactory), + RedisCache cacheWithCustomPrefix = new RedisCache("cache", + RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory), RedisCacheConfiguration.defaultCacheConfig().serializeValuesWith(SerializationPair.fromSerializer(serializer)) .computePrefixWith(cacheName -> "_" + cacheName + "_")); @@ -296,7 +326,8 @@ void computePrefixCreatesCacheKeyCorrectly() { @ParameterizedRedisTest // DATAREDIS-1041 void prefixCacheNameCreatesCacheKeyCorrectly() { - RedisCache cacheWithCustomPrefix = new RedisCache("cache", new DefaultRedisCacheWriter(connectionFactory), + RedisCache cacheWithCustomPrefix = new RedisCache("cache", + RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory), RedisCacheConfiguration.defaultCacheConfig().serializeValuesWith(SerializationPair.fromSerializer(serializer)) .prefixCacheNameWith("redis::")); @@ -314,7 +345,8 @@ void fetchKeyWithComputedPrefixReturnsExpectedResult() { doWithConnection(connection -> connection.set("_cache_key-1".getBytes(StandardCharsets.UTF_8), binarySample)); - RedisCache cacheWithCustomPrefix = new RedisCache("cache", new DefaultRedisCacheWriter(connectionFactory), + RedisCache cacheWithCustomPrefix = new RedisCache("cache", + RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory), RedisCacheConfiguration.defaultCacheConfig().serializeValuesWith(SerializationPair.fromSerializer(serializer)) .computePrefixWith(cacheName -> "_" + cacheName + "_")); From f1aa5a6e5be92adf6fe91272358c680f00069068 Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Wed, 21 Apr 2021 11:07:42 +0200 Subject: [PATCH 4/6] Documentation. --- src/main/asciidoc/reference/redis-cache.adoc | 128 ++++++++++++++++++ src/main/asciidoc/reference/redis.adoc | 119 +--------------- .../data/redis/cache/BatchStrategy.java | 4 +- .../data/redis/cache/RedisCacheManager.java | 2 + 4 files changed, 134 insertions(+), 119 deletions(-) create mode 100644 src/main/asciidoc/reference/redis-cache.adoc diff --git a/src/main/asciidoc/reference/redis-cache.adoc b/src/main/asciidoc/reference/redis-cache.adoc new file mode 100644 index 0000000000..522b5881bd --- /dev/null +++ b/src/main/asciidoc/reference/redis-cache.adoc @@ -0,0 +1,128 @@ +[[redis:support:cache-abstraction]] +== Redis Cache + +NOTE: Changed in 2.0 + +Spring Redis provides an implementation for the Spring https://docs.spring.io/spring/docs/{springVersion}/spring-framework-reference/integration.html#cache[cache abstraction] through the `org.springframework.data.redis.cache` package. To use Redis as a backing implementation, add `RedisCacheManager` to your configuration, as follows: + +[source,java] +---- +@Bean +public RedisCacheManager cacheManager(RedisConnectionFactory connectionFactory) { + return RedisCacheManager.create(connectionFactory); +} +---- + +`RedisCacheManager` behavior can be configured with `RedisCacheManagerBuilder`, letting you set the default `RedisCacheConfiguration`, transaction behavior, and predefined caches. + +[source,java] +---- +RedisCacheManager cm = RedisCacheManager.builder(connectionFactory) + .cacheDefaults(defaultCacheConfig()) + .withInitialCacheConfigurations(singletonMap("predefined", defaultCacheConfig().disableCachingNullValues())) + .transactionAware() + .build(); +---- + +As shown in the preceding example, `RedisCacheManager` allows definition of configurations on a per-cache basis. + +The behavior of `RedisCache` created with `RedisCacheManager` is defined with `RedisCacheConfiguration`. The configuration lets you set key expiration times, prefixes, and ``RedisSerializer`` implementations for converting to and from the binary storage format, as shown in the following example: + +[source,java] +---- +RedisCacheConfiguration config = RedisCacheConfiguration.defaultCacheConfig() + .entryTtl(Duration.ofSeconds(1)) + .disableCachingNullValues(); +---- + +`RedisCacheManager` defaults to a lock-free `RedisCacheWriter` for reading and writing binary values. Lock-free caching improves throughput. The lack of entry locking can lead to overlapping, non-atomic commands for the `putIfAbsent` and `clean` methods, as those require multiple commands to be sent to Redis. The locking counterpart prevents command overlap by setting an explicit lock key and checking against presence of this key, which leads to additional requests and potential command wait times. + +It is possible to opt in to the locking behavior as follows: + +[source,java] +---- +RedisCacheManager cm = RedisCacheManager.build(RedisCacheWriter.lockingRedisCacheWriter(connectionFactory)) + .cacheDefaults(defaultCacheConfig()) + ... +---- + +By default, any `key` for a cache entry gets prefixed with the actual cache name followed by two colons. +This behavior can be changed to a static as well as a computed prefix. + +The following example shows how to set a static prefix: + +[source,java] +---- +// static key prefix +RedisCacheConfiguration.defaultCacheConfig().prefixKeysWith("( ͡° ᴥ ͡°)"); + +The following example shows how to set a computed prefix: + +// computed key prefix +RedisCacheConfiguration.defaultCacheConfig().computePrefixWith(cacheName -> "¯\_(ツ)_/¯" + cacheName); +---- + +The cache implementation defaults to use `KEYS` and `DEL` to clear the cache. `KEYS` can cause performance issues with large keyspaces. Therefore, the default `RedisCacheWriter` can be created with a `BatchStrategy` to switch to a `SCAN`-based batch strategy. The `SCAN` strategy requires a batch size to avoid excessive Redis command roundtrips: + +[source,java] +---- +RedisCacheManager cm = RedisCacheManager.build(RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory, BatchStrategy.scan(1000))) + .cacheDefaults(defaultCacheConfig()) + ... +---- + +NOTE: The `KEYS` batch strategy is fully supported using any driver and Redis operation mode (Standalone, Clustered). `SCAN` is fully supported when using the Lettuce driver. Jedis supports `SCAN` only in non-clustered modes. + +The following table lists the default settings for `RedisCacheManager`: + +.`RedisCacheManager` defaults +[width="80%",cols="<1,<2",options="header"] +|==== +|Setting +|Value + +|Cache Writer +|Non-locking, `KEYS` batch strategy + +|Cache Configuration +|`RedisCacheConfiguration#defaultConfiguration` + +|Initial Caches +|None + +|Transaction Aware +|No +|==== + +The following table lists the default settings for `RedisCacheConfiguration`: + +.RedisCacheConfiguration defaults +[width="80%",cols="<1,<2",options="header"] +|==== +|Key Expiration +|None + +|Cache `null` +|Yes + +|Prefix Keys +|Yes + +|Default Prefix +|The actual cache name + +|Key Serializer +|`StringRedisSerializer` + +|Value Serializer +|`JdkSerializationRedisSerializer` + +|Conversion Service +|`DefaultFormattingConversionService` with default cache key converters +|==== + +[NOTE] +==== +By default `RedisCache`, statistics are disabled. +Use `RedisCacheManagerBuilder.enableStatistics()` to collect local _hits_ and _misses_ through `RedisCache#getStatistics()`, returning a snapshot of the collected data. +==== diff --git a/src/main/asciidoc/reference/redis.adoc b/src/main/asciidoc/reference/redis.adoc index 58961879b1..691e101a67 100644 --- a/src/main/asciidoc/reference/redis.adoc +++ b/src/main/asciidoc/reference/redis.adoc @@ -652,6 +652,8 @@ include::{referenceDir}/pipelining.adoc[] include::{referenceDir}/redis-scripting.adoc[] +include::{referenceDir}/redis-cache.adoc[] + :leveloffset: 1 [[redis:support]] == Support Classes @@ -693,120 +695,3 @@ public class AnotherExample { As shown in the preceding example, the consuming code is decoupled from the actual storage implementation. In fact, there is no indication that Redis is used underneath. This makes moving from development to production environments transparent and highly increases testability (the Redis implementation can be replaced with an in-memory one). -[[redis:support:cache-abstraction]] -=== Support for the Spring Cache Abstraction - -NOTE: Changed in 2.0 - -Spring Redis provides an implementation for the Spring https://docs.spring.io/spring/docs/{springVersion}/spring-framework-reference/integration.html#cache[cache abstraction] through the `org.springframework.data.redis.cache` package. To use Redis as a backing implementation, add `RedisCacheManager` to your configuration, as follows: - -[source,java] ----- -@Bean -public RedisCacheManager cacheManager(RedisConnectionFactory connectionFactory) { - return RedisCacheManager.create(connectionFactory); -} ----- - -`RedisCacheManager` behavior can be configured with `RedisCacheManagerBuilder`, letting you set the default `RedisCacheConfiguration`, transaction behavior, and predefined caches. - -[source,java] ----- -RedisCacheManager cm = RedisCacheManager.builder(connectionFactory) - .cacheDefaults(defaultCacheConfig()) - .withInitialCacheConfigurations(singletonMap("predefined", defaultCacheConfig().disableCachingNullValues())) - .transactionAware() - .build(); ----- - -As shown in the preceding example, `RedisCacheManager` allows definition of configurations on a per-cache basis. - -The behavior of `RedisCache` created with `RedisCacheManager` is defined with `RedisCacheConfiguration`. The configuration lets you set key expiration times, prefixes, and ``RedisSerializer`` implementations for converting to and from the binary storage format, as shown in the following example: - -[source,java] ----- -RedisCacheConfiguration config = RedisCacheConfiguration.defaultCacheConfig() - .entryTtl(Duration.ofSeconds(1)) - .disableCachingNullValues(); ----- - -`RedisCacheManager` defaults to a lock-free `RedisCacheWriter` for reading and writing binary values. Lock-free caching improves throughput. The lack of entry locking can lead to overlapping, non-atomic commands for the `putIfAbsent` and `clean` methods, as those require multiple commands to be sent to Redis. The locking counterpart prevents command overlap by setting an explicit lock key and checking against presence of this key, which leads to additional requests and potential command wait times. - -It is possible to opt in to the locking behavior as follows: - -[source,java] ----- -RedisCacheManager cm = RedisCacheManager.build(RedisCacheWriter.lockingRedisCacheWriter()) - .cacheDefaults(defaultCacheConfig()) - ... ----- - -By default, any `key` for a cache entry gets prefixed with the actual cache name followed by two colons. -This behavior can be changed to a static as well as a computed prefix. - -The following example shows how to set a static prefix: - -[source,java] ----- -// static key prefix -RedisCacheConfiguration.defaultCacheConfig().prefixKeysWith("( ͡° ᴥ ͡°)"); - -The following example shows how to set a computed prefix: - -// computed key prefix -RedisCacheConfiguration.defaultCacheConfig().computePrefixWith(cacheName -> "¯\_(ツ)_/¯" + cacheName); ----- - -The following table lists the default settings for `RedisCacheManager`: - -.`RedisCacheManager` defaults -[width="80%",cols="<1,<2",options="header"] -|==== -|Setting -|Value - -|Cache Writer -|Non-locking - -|Cache Configuration -|`RedisCacheConfiguration#defaultConfiguration` - -|Initial Caches -|None - -|Transaction Aware -|No -|==== - -The following table lists the default settings for `RedisCacheConfiguration`: - -.RedisCacheConfiguration defaults -[width="80%",cols="<1,<2",options="header"] -|==== -|Key Expiration -|None - -|Cache `null` -|Yes - -|Prefix Keys -|Yes - -|Default Prefix -|The actual cache name - -|Key Serializer -|`StringRedisSerializer` - -|Value Serializer -|`JdkSerializationRedisSerializer` - -|Conversion Service -|`DefaultFormattingConversionService` with default cache key converters -|==== - -[NOTE] -==== -By default `RedisCache`, statistics are disabled. -Use `RedisCacheManagerBuilder.enableStatistics()` to collect local _hits_ and _misses_ through `RedisCache#getStatistics()`, returning a snapshot of the collected data. -==== diff --git a/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java b/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java index 5bf25537e7..9c41f6c803 100644 --- a/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java +++ b/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java @@ -39,8 +39,8 @@ public abstract class BatchStrategy { /** * Batching strategy using a single {@code KEYS} and {@code DEL} command to remove all matching keys. {@code KEYS} - * scans the entire keyspace of the Redis database and can block the Redis worker thread for a long time when the - * keyspace has a significant size. + * scans the entire keyspace of the Redis database and can block the Redis worker thread for a long time on large + * keyspaces. *

* {@code KEYS} is supported for standalone and clustered (sharded) Redis operation modes. * diff --git a/src/main/java/org/springframework/data/redis/cache/RedisCacheManager.java b/src/main/java/org/springframework/data/redis/cache/RedisCacheManager.java index bd764f5d26..766e83138f 100644 --- a/src/main/java/org/springframework/data/redis/cache/RedisCacheManager.java +++ b/src/main/java/org/springframework/data/redis/cache/RedisCacheManager.java @@ -169,6 +169,8 @@ public RedisCacheManager(RedisCacheWriter cacheWriter, RedisCacheConfiguration d *

*
locking
*
disabled
+ *
batch strategy
+ *
{@link BatchStrategy#keys() KEYS}
*
cache configuration
*
{@link RedisCacheConfiguration#defaultCacheConfig()}
*
initial caches
From 0a27cf3ce837d79b69f70fc7916636fcc033b8da Mon Sep 17 00:00:00 2001 From: Mark Paluch Date: Wed, 21 Apr 2021 11:16:41 +0200 Subject: [PATCH 5/6] Mention RedisCache locking behavior details. Closes #1801. --- src/main/asciidoc/reference/redis-cache.adoc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/main/asciidoc/reference/redis-cache.adoc b/src/main/asciidoc/reference/redis-cache.adoc index 522b5881bd..bd2c5d22f3 100644 --- a/src/main/asciidoc/reference/redis-cache.adoc +++ b/src/main/asciidoc/reference/redis-cache.adoc @@ -35,7 +35,11 @@ RedisCacheConfiguration config = RedisCacheConfiguration.defaultCacheConfig() .disableCachingNullValues(); ---- -`RedisCacheManager` defaults to a lock-free `RedisCacheWriter` for reading and writing binary values. Lock-free caching improves throughput. The lack of entry locking can lead to overlapping, non-atomic commands for the `putIfAbsent` and `clean` methods, as those require multiple commands to be sent to Redis. The locking counterpart prevents command overlap by setting an explicit lock key and checking against presence of this key, which leads to additional requests and potential command wait times. +`RedisCacheManager` defaults to a lock-free `RedisCacheWriter` for reading and writing binary values. +Lock-free caching improves throughput. +The lack of entry locking can lead to overlapping, non-atomic commands for the `putIfAbsent` and `clean` methods, as those require multiple commands to be sent to Redis. The locking counterpart prevents command overlap by setting an explicit lock key and checking against presence of this key, which leads to additional requests and potential command wait times. + +Locking applies on the *cache level*, not per *cache entry*. It is possible to opt in to the locking behavior as follows: From 68cef9a1da23ceec63b95139bbb0126ad4dd304f Mon Sep 17 00:00:00 2001 From: Christoph Strobl Date: Tue, 18 May 2021 10:11:25 +0200 Subject: [PATCH 6/6] Transition BatchStrategy to interface. ...and use long return type instead of int for cleanCache. --- src/main/asciidoc/reference/redis-cache.adoc | 4 +- .../data/redis/cache/BatchStrategies.java | 158 ++++++++++++++++++ .../data/redis/cache/BatchStrategy.java | 146 ++-------------- .../redis/cache/DefaultRedisCacheWriter.java | 8 +- .../data/redis/cache/RedisCacheWriter.java | 4 +- .../cache/DefaultRedisCacheWriterTests.java | 2 +- .../data/redis/cache/RedisCacheTests.java | 2 +- 7 files changed, 180 insertions(+), 144 deletions(-) create mode 100644 src/main/java/org/springframework/data/redis/cache/BatchStrategies.java diff --git a/src/main/asciidoc/reference/redis-cache.adoc b/src/main/asciidoc/reference/redis-cache.adoc index bd2c5d22f3..c06c975e80 100644 --- a/src/main/asciidoc/reference/redis-cache.adoc +++ b/src/main/asciidoc/reference/redis-cache.adoc @@ -26,7 +26,7 @@ RedisCacheManager cm = RedisCacheManager.builder(connectionFactory) As shown in the preceding example, `RedisCacheManager` allows definition of configurations on a per-cache basis. -The behavior of `RedisCache` created with `RedisCacheManager` is defined with `RedisCacheConfiguration`. The configuration lets you set key expiration times, prefixes, and ``RedisSerializer`` implementations for converting to and from the binary storage format, as shown in the following example: +The behavior of `RedisCache` created with `RedisCacheManager` is defined with `RedisCacheConfiguration`. The configuration lets you set key expiration times, prefixes, and `RedisSerializer` implementations for converting to and from the binary storage format, as shown in the following example: [source,java] ---- @@ -70,7 +70,7 @@ The cache implementation defaults to use `KEYS` and `DEL` to clear the cache. `K [source,java] ---- -RedisCacheManager cm = RedisCacheManager.build(RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory, BatchStrategy.scan(1000))) +RedisCacheManager cm = RedisCacheManager.build(RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory, BatchStrategies.scan(1000))) .cacheDefaults(defaultCacheConfig()) ... ---- diff --git a/src/main/java/org/springframework/data/redis/cache/BatchStrategies.java b/src/main/java/org/springframework/data/redis/cache/BatchStrategies.java new file mode 100644 index 0000000000..381896ec7c --- /dev/null +++ b/src/main/java/org/springframework/data/redis/cache/BatchStrategies.java @@ -0,0 +1,158 @@ +/* + * Copyright 2021 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.redis.cache; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Optional; + +import org.springframework.data.redis.connection.RedisConnection; +import org.springframework.data.redis.core.Cursor; +import org.springframework.data.redis.core.ScanOptions; +import org.springframework.util.Assert; + +/** + * A collection of predefined {@link BatchStrategy} implementations using {@code KEYS} or {@code SCAN} command. + * + * @author Mark Paluch + * @author Christoph Strobl + * @since 2.6 + */ +public abstract class BatchStrategies { + + /** + * A {@link BatchStrategy} using a single {@code KEYS} and {@code DEL} command to remove all matching keys. + * {@code KEYS} scans the entire keyspace of the Redis database and can block the Redis worker thread for a long time + * on large keyspaces. + *

+ * {@code KEYS} is supported for standalone and clustered (sharded) Redis operation modes. + * + * @return batching strategy using {@code KEYS}. + */ + public static BatchStrategy keys() { + return Keys.INSTANCE; + } + + /** + * A {@link BatchStrategy} using a {@code SCAN} cursors and potentially multiple {@code DEL} commands to remove all + * matching keys. This strategy allows a configurable batch size to optimize for scan batching. + *

+ * Note that using the {@code SCAN} strategy might be not supported on all drivers and Redis operation modes. + * + * @return batching strategy using {@code SCAN}. + */ + public static BatchStrategy scan(int batchSize) { + + Assert.isTrue(batchSize > 0, "Batch size must be greater than zero!"); + + return new Scan(batchSize); + } + + /** + * {@link BatchStrategy} using {@code KEYS}. + */ + static class Keys implements BatchStrategy { + + static Keys INSTANCE = new Keys(); + + @Override + public long cleanCache(RedisConnection connection, String name, byte[] pattern) { + + byte[][] keys = Optional.ofNullable(connection.keys(pattern)).orElse(Collections.emptySet()) + .toArray(new byte[0][]); + + if (keys.length > 0) { + connection.del(keys); + } + + return keys.length; + } + } + + /** + * {@link BatchStrategy} using {@code SCAN}. + */ + static class Scan implements BatchStrategy { + + private final int batchSize; + + Scan(int batchSize) { + this.batchSize = batchSize; + } + + @Override + public long cleanCache(RedisConnection connection, String name, byte[] pattern) { + + Cursor cursor = connection.scan(ScanOptions.scanOptions().count(batchSize).match(pattern).build()); + + long count = 0; + + PartitionIterator partitions = new PartitionIterator<>(cursor, batchSize); + while (partitions.hasNext()) { + + List keys = partitions.next(); + count += keys.size(); + + if (keys.size() > 0) { + connection.del(keys.toArray(new byte[0][])); + } + } + + return count; + } + } + + /** + * Utility to split and buffer outcome from a {@link Iterator} into {@link List lists} of {@code T} with a maximum + * chunks {@code size}. + * + * @param + */ + static class PartitionIterator implements Iterator> { + + private final Iterator iterator; + private final int size; + + PartitionIterator(Iterator iterator, int size) { + + this.iterator = iterator; + this.size = size; + } + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public List next() { + + if (!hasNext()) { + throw new NoSuchElementException(); + } + + List list = new ArrayList<>(size); + while (list.size() < size && iterator.hasNext()) { + list.add(iterator.next()); + } + + return list; + } + } +} diff --git a/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java b/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java index 9c41f6c803..bb9fd46d1d 100644 --- a/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java +++ b/src/main/java/org/springframework/data/redis/cache/BatchStrategy.java @@ -15,156 +15,30 @@ */ package org.springframework.data.redis.cache; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Optional; - import org.springframework.data.redis.connection.RedisConnection; -import org.springframework.data.redis.core.Cursor; -import org.springframework.data.redis.core.ScanOptions; -import org.springframework.util.Assert; /** - * Batch strategies to be used with {@link RedisCacheWriter}. + * A {@link BatchStrategy} to be used with {@link RedisCacheWriter}. + *

+ * Mainly used to clear the cache. *

- * Primarily used to clear the cache. + * Predefined strategies using the {@link BatchStrategies#keys() KEYS} or {@link BatchStrategies#scan(int) SCAN} + * commands can be found in {@link BatchStrategies}. * * @author Mark Paluch + * @author Christoph Strobl * @since 2.6 */ -public abstract class BatchStrategy { - - /** - * Batching strategy using a single {@code KEYS} and {@code DEL} command to remove all matching keys. {@code KEYS} - * scans the entire keyspace of the Redis database and can block the Redis worker thread for a long time on large - * keyspaces. - *

- * {@code KEYS} is supported for standalone and clustered (sharded) Redis operation modes. - * - * @return batching strategy using {@code KEYS}. - */ - public static BatchStrategy keys() { - return Keys.INSTANCE; - } - - /** - * Batching strategy using a {@code SCAN} cursors and potentially multiple {@code DEL} commands to remove all matching - * keys. This strategy allows a configurable batch size to optimize for scan batching. - *

- * Note that using the {@code SCAN} strategy might be not supported on all drivers and Redis operation modes. - * - * @return batching strategy using {@code SCAN}. - */ - public static BatchStrategy scan(int batchSize) { - - Assert.isTrue(batchSize > 0, "Batch size must be greater than zero"); - - return new Scan(batchSize); - } +public interface BatchStrategy { /** * Remove all keys following the given pattern. * - * @param the connection to use. - * @param name The cache name must not be {@literal null}. + * @param connection the connection to use. Must not be {@literal null}. + * @param name The cache name. Must not be {@literal null}. * @param pattern The pattern for the keys to remove. Must not be {@literal null}. * @return number of removed keys. */ - abstract int cleanCache(RedisConnection connection, String name, byte[] pattern); - - /** - * {@link BatchStrategy} using {@code KEYS}. - */ - static class Keys extends BatchStrategy { - - static Keys INSTANCE = new Keys(); - - @Override - int cleanCache(RedisConnection connection, String name, byte[] pattern) { - - byte[][] keys = Optional.ofNullable(connection.keys(pattern)).orElse(Collections.emptySet()) - .toArray(new byte[0][]); - - if (keys.length > 0) { - connection.del(keys); - } - - return keys.length; - } - } - - /** - * {@link BatchStrategy} using {@code SCAN}. - */ - static class Scan extends BatchStrategy { - - private final int batchSize; - - public Scan(int batchSize) { - this.batchSize = batchSize; - } - - @Override - int cleanCache(RedisConnection connection, String name, byte[] pattern) { - - Cursor cursor = connection.scan(ScanOptions.scanOptions().count(batchSize).match(pattern).build()); - - PartitionIterator partitions = new PartitionIterator<>(cursor, batchSize); - - int count = 0; - - while (partitions.hasNext()) { - - List keys = partitions.next(); - count += keys.size(); - - if (keys.size() > 0) { - connection.del(keys.toArray(new byte[0][])); - } - } - - return count; - } - } - - /** - * Utility to split and buffer outcome from a {@link Iterator} into {@link List lists} of {@code T} with a maximum - * chunks {@code size}. - * - * @param - */ - static class PartitionIterator implements Iterator> { - - private final Iterator iterator; - private final int size; - - public PartitionIterator(Iterator iterator, int size) { - this.iterator = iterator; - this.size = size; - } - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public List next() { - - if (!hasNext()) { - throw new NoSuchElementException(); - } - - List list = new ArrayList<>(size); - while (list.size() < size && iterator.hasNext()) { - list.add(iterator.next()); - } - - return list; - } - } + long cleanCache(RedisConnection connection, String name, byte[] pattern); } diff --git a/src/main/java/org/springframework/data/redis/cache/DefaultRedisCacheWriter.java b/src/main/java/org/springframework/data/redis/cache/DefaultRedisCacheWriter.java index 6cfe7b950b..8afa0f8d76 100644 --- a/src/main/java/org/springframework/data/redis/cache/DefaultRedisCacheWriter.java +++ b/src/main/java/org/springframework/data/redis/cache/DefaultRedisCacheWriter.java @@ -217,8 +217,12 @@ public void clean(String name, byte[] pattern) { wasLocked = true; } - - statistics.incDeletesBy(name, batchStrategy.cleanCache(connection, name, pattern)); + long deleteCount = batchStrategy.cleanCache(connection, name, pattern); + while (deleteCount > Integer.MAX_VALUE) { + statistics.incDeletesBy(name, Integer.MAX_VALUE); + deleteCount -= Integer.MAX_VALUE; + } + statistics.incDeletesBy(name, (int) deleteCount); } finally { diff --git a/src/main/java/org/springframework/data/redis/cache/RedisCacheWriter.java b/src/main/java/org/springframework/data/redis/cache/RedisCacheWriter.java index abe121ee89..0210c89124 100644 --- a/src/main/java/org/springframework/data/redis/cache/RedisCacheWriter.java +++ b/src/main/java/org/springframework/data/redis/cache/RedisCacheWriter.java @@ -43,7 +43,7 @@ public interface RedisCacheWriter extends CacheStatisticsProvider { * @return new instance of {@link DefaultRedisCacheWriter}. */ static RedisCacheWriter nonLockingRedisCacheWriter(RedisConnectionFactory connectionFactory) { - return nonLockingRedisCacheWriter(connectionFactory, BatchStrategy.keys()); + return nonLockingRedisCacheWriter(connectionFactory, BatchStrategies.keys()); } /** @@ -70,7 +70,7 @@ static RedisCacheWriter nonLockingRedisCacheWriter(RedisConnectionFactory connec * @return new instance of {@link DefaultRedisCacheWriter}. */ static RedisCacheWriter lockingRedisCacheWriter(RedisConnectionFactory connectionFactory) { - return lockingRedisCacheWriter(connectionFactory, BatchStrategy.keys()); + return lockingRedisCacheWriter(connectionFactory, BatchStrategies.keys()); } /** diff --git a/src/test/java/org/springframework/data/redis/cache/DefaultRedisCacheWriterTests.java b/src/test/java/org/springframework/data/redis/cache/DefaultRedisCacheWriterTests.java index fa3536f282..f66bab744f 100644 --- a/src/test/java/org/springframework/data/redis/cache/DefaultRedisCacheWriterTests.java +++ b/src/test/java/org/springframework/data/redis/cache/DefaultRedisCacheWriterTests.java @@ -307,7 +307,7 @@ void lockingCacheWriterShouldExitWhenInterruptedWaitForLockRelease() throws Inte Thread th = new Thread(() -> { DefaultRedisCacheWriter writer = new DefaultRedisCacheWriter(connectionFactory, Duration.ofMillis(50), - BatchStrategy.keys()) { + BatchStrategies.keys()) { @Override boolean doCheckLock(String name, RedisConnection connection) { diff --git a/src/test/java/org/springframework/data/redis/cache/RedisCacheTests.java b/src/test/java/org/springframework/data/redis/cache/RedisCacheTests.java index a32d7c4bf4..ef29b13782 100644 --- a/src/test/java/org/springframework/data/redis/cache/RedisCacheTests.java +++ b/src/test/java/org/springframework/data/redis/cache/RedisCacheTests.java @@ -262,7 +262,7 @@ void clearWithScanShouldClearCache() { } RedisCache cache = new RedisCache("cache", - RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory, BatchStrategy.scan(25)), + RedisCacheWriter.nonLockingRedisCacheWriter(connectionFactory, BatchStrategies.scan(25)), RedisCacheConfiguration.defaultCacheConfig().serializeValuesWith(SerializationPair.fromSerializer(serializer))); doWithConnection(connection -> {